diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 050c43ef6..20cd14454 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -39,7 +39,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml index db6bc6ee8..4bd14c2ae 100644 --- a/.github/workflows/commitlint.yml +++ b/.github/workflows/commitlint.yml @@ -5,7 +5,7 @@ jobs: commitlint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - uses: wagoid/commitlint-github-action@v5 \ No newline at end of file diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 454c5b15a..1d8387256 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -1,7 +1,7 @@ name: Build, Test, Lint License env: - GO_VERSION: "1.21.0" + GO_VERSION: "1.21.1" on: push: @@ -17,7 +17,7 @@ jobs: govulncheck: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} @@ -27,7 +27,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v4 @@ -48,7 +48,7 @@ jobs: if: '!github.event.deleted' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 with: diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 09861e044..5b1e2954c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,8 +19,8 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.21.0' - - uses: actions/checkout@v3 + go-version: '1.21.1' + - uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: diff --git a/Makefile b/Makefile index 4f322cb18..81dc22aaa 100644 --- a/Makefile +++ b/Makefile @@ -167,8 +167,8 @@ ifeq ($(origin ci),undefined) @echo make ci=/path/to/harvest.yml ci-local @exit 1 endif - -@docker stop $$(docker ps -aq) 2>/dev/null || true - -@docker rm $$(docker ps -aq) 2>/dev/null || true + -@docker stop $$(docker ps -a --format '{{.ID}} {{.Names}}' | grep -E 'grafana|prometheus|poller') 2>/dev/null || true + -@docker rm $$(docker ps -a --format '{{.ID}} {{.Names}}' | grep -E 'grafana|prometheus|poller') 2>/dev/null || true -@docker volume rm harvest_grafana_data harvest_prometheus_data 2>/dev/null || true @if [ "$(ci)" != "harvest.yml" ]; then cp $(ci) harvest.yml; else echo "Source and destination files are the same, skipping copy"; fi @./bin/harvest generate docker full --port --output harvest-compose.yml diff --git a/cmd/admin/admin.go b/cmd/admin/admin.go index e1d976c6e..d8eed9f0d 100644 --- a/cmd/admin/admin.go +++ b/cmd/admin/admin.go @@ -223,7 +223,7 @@ func doTLS(_ *cobra.Command, _ []string) { func doAdmin(c *cobra.Command, _ []string) { var configPath = c.Root().PersistentFlags().Lookup("config").Value.String() - err := conf.LoadHarvestConfig(configPath) + _, err := conf.LoadHarvestConfig(configPath) if err != nil { return } diff --git a/cmd/collectors/collectorstest.go b/cmd/collectors/collectorstest.go index ad432c0db..9e9b073aa 100644 --- a/cmd/collectors/collectorstest.go +++ b/cmd/collectors/collectorstest.go @@ -50,13 +50,11 @@ func JSONToGson(path string, flatten bool) []gjson.Result { return nil } bb := b.Bytes() - output := gjson.GetManyBytes(bb, "records", "num_records", "_links.next.href") + output := gjson.ParseBytes(bb) + data := output.Get("records") + numRecords := output.Get("num_records") - data := output[0] - numRecords := output[1] - isNonIterRestCall := !data.Exists() - - if isNonIterRestCall { + if !data.Exists() { contentJSON := `{"records":[]}` response, err := sjson.SetRawBytes([]byte(contentJSON), "records.-1", bb) if err != nil { diff --git a/cmd/collectors/ems/ems_test.go b/cmd/collectors/ems/ems_test.go index b66836850..50af08a01 100644 --- a/cmd/collectors/ems/ems_test.go +++ b/cmd/collectors/ems/ems_test.go @@ -43,18 +43,17 @@ func BookendEmsTest(t *testing.T, e *Ems) { } func NewEms() *Ems { - // homepath is harvest directory level + // homePath is harvest directory level homePath := "../../../" - emsConfgPath := homePath + "conf/ems/default.yaml" - emsPoller := "testEms" + emsConfigPath := homePath + "conf/ems/default.yaml" conf.TestLoadHarvestConfig("testdata/config.yml") - opts := options.Options{ - Poller: emsPoller, - HomePath: homePath, - IsTest: true, - } - ac := collector.New("Ems", "Ems", &opts, emsParams(emsConfgPath), nil) + opts := options.New(options.WithConfPath(homePath + "conf")) + opts.Poller = "testEms" + opts.HomePath = homePath + opts.IsTest = true + + ac := collector.New("Ems", "Ems", opts, emsParams(emsConfigPath), nil) e := &Ems{} if err := e.Init(ac); err != nil { log.Fatal().Err(err).Send() diff --git a/cmd/collectors/power.go b/cmd/collectors/power.go new file mode 100644 index 000000000..30728672f --- /dev/null +++ b/cmd/collectors/power.go @@ -0,0 +1,444 @@ +package collectors + +import ( + "fmt" + "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/cmd/tools/rest" + "github.com/netapp/harvest/v2/pkg/conf" + "github.com/netapp/harvest/v2/pkg/logging" + "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/netapp/harvest/v2/pkg/util" + "regexp" + "sort" + "strings" + "time" +) + +const ( + zapiValueKey = "environment-sensors-info.threshold-sensor-value" + restValueKey = "value" +) + +// CollectChassisFRU is here because both ZAPI and REST sensor.go plugin call it to collect +// `system chassis fru show`. +// Chassis FRU information is only available via private CLI +func collectChassisFRU(client *rest.Client, logger *logging.Logger) (map[string]int, error) { + fields := "fru-name,type,status,connected-nodes,num-nodes" + query := "api/private/cli/system/chassis/fru" + filter := []string{"type=psu"} + href := rest.BuildHref("", fields, filter, "", "", "", "", query) + + result, err := rest.Fetch(client, href) + if err != nil { + return nil, fmt.Errorf("failed to fetch data href=%s err=%w", href, err) + } + + // map of PSUs node -> numNode + nodeToNumNode := make(map[string]int) + + for _, r := range result { + cn := r.Get("connected_nodes") + if !cn.Exists() { + logger.Warn(). + Str("cluster", client.Cluster().Name). + Str("fru", r.Get("fru_name").String()). + Msg("fru has no connected nodes") + continue + } + numNodes := int(r.Get("num_nodes").Int()) + for _, e := range cn.Array() { + nodeToNumNode[e.String()] = numNodes + } + } + return nodeToNumNode, nil +} + +type sensorValue struct { + node string + name string + value float64 + unit string +} + +type environmentMetric struct { + key string + ambientTemperature []float64 + nonAmbientTemperature []float64 + fanSpeed []float64 + powerSensor map[string]*sensorValue + voltageSensor map[string]*sensorValue + currentSensor map[string]*sensorValue +} + +var ambientRegex = regexp.MustCompile(`^(Ambient Temp|Ambient Temp \d|PSU\d AmbTemp|PSU\d Inlet|PSU\d Inlet Temp|In Flow Temp|Front Temp|Bat_Ambient \d|Riser Inlet Temp)$`) + +var powerInRegex = regexp.MustCompile(`^PSU\d (InPwr Monitor|InPower|PIN|Power In)$`) + +var voltageRegex = regexp.MustCompile(`^PSU\d (\d+V|InVoltage|VIN|AC In Volt)$`) + +var CurrentRegex = regexp.MustCompile(`^PSU\d (\d+V Curr|Curr|InCurrent|Curr IIN|AC In Curr)$`) + +var eMetrics = []string{ + "average_ambient_temperature", + "average_fan_speed", + "average_temperature", + "max_fan_speed", + "max_temperature", + "min_ambient_temperature", + "min_fan_speed", + "min_temperature", + "power", +} + +func calculateEnvironmentMetrics(data *matrix.Matrix, logger *logging.Logger, valueKey string, myData *matrix.Matrix, nodeToNumNode map[string]int) ([]*matrix.Matrix, error) { + sensorEnvironmentMetricMap := make(map[string]*environmentMetric) + excludedSensors := make(map[string][]sensorValue) + + for k, instance := range data.GetInstances() { + if !instance.IsExportable() { + continue + } + iKey := instance.GetLabel("node") + if iKey == "" { + logger.Warn().Str("key", k).Msg("missing node label for instance") + continue + } + sensorName := instance.GetLabel("sensor") + if sensorName == "" { + logger.Warn().Str("key", k).Msg("missing sensor name for instance") + continue + } + if _, ok := sensorEnvironmentMetricMap[iKey]; !ok { + sensorEnvironmentMetricMap[iKey] = &environmentMetric{key: iKey, ambientTemperature: []float64{}, nonAmbientTemperature: []float64{}, fanSpeed: []float64{}} + } + for mKey, metric := range data.GetMetrics() { + if mKey != valueKey { + continue + } + sensorType := instance.GetLabel("type") + sensorUnit := instance.GetLabel("unit") + + isAmbientMatch := ambientRegex.MatchString(sensorName) + isPowerMatch := powerInRegex.MatchString(sensorName) + isVoltageMatch := voltageRegex.MatchString(sensorName) + isCurrentMatch := CurrentRegex.MatchString(sensorName) + + logger.Debug(). + Bool("isAmbientMatch", isAmbientMatch). + Bool("isPowerMatch", isPowerMatch). + Bool("isVoltageMatch", isVoltageMatch). + Bool("isCurrentMatch", isCurrentMatch). + Str("sensorType", sensorType). + Str("sensorUnit", sensorUnit). + Str("sensorName", sensorName). + Send() + + if sensorType == "thermal" && isAmbientMatch { + if value, ok := metric.GetValueFloat64(instance); ok { + sensorEnvironmentMetricMap[iKey].ambientTemperature = append(sensorEnvironmentMetricMap[iKey].ambientTemperature, value) + } + } + + if sensorType == "thermal" && !isAmbientMatch { + // Exclude temperature sensors that contains sensor name `Margin` and value < 0 + value, ok := metric.GetValueFloat64(instance) + if value > 0 && !strings.Contains(sensorName, "Margin") { + if ok { + sensorEnvironmentMetricMap[iKey].nonAmbientTemperature = append(sensorEnvironmentMetricMap[iKey].nonAmbientTemperature, value) + } + } else { + excludedSensors[iKey] = append(excludedSensors[iKey], sensorValue{ + node: iKey, + name: sensorName, + value: value, + }) + } + } + + if sensorType == "fan" { + if value, ok := metric.GetValueFloat64(instance); ok { + sensorEnvironmentMetricMap[iKey].fanSpeed = append(sensorEnvironmentMetricMap[iKey].fanSpeed, value) + } + } + + if isPowerMatch { + if value, ok := metric.GetValueFloat64(instance); ok { + if !IsValidUnit(sensorUnit) { + logger.Warn().Str("unit", sensorUnit).Float64("value", value).Msg("unknown power unit") + } else { + if sensorEnvironmentMetricMap[iKey].powerSensor == nil { + sensorEnvironmentMetricMap[iKey].powerSensor = make(map[string]*sensorValue) + } + sensorEnvironmentMetricMap[iKey].powerSensor[k] = &sensorValue{ + node: iKey, + name: sensorName, + value: value, + unit: sensorUnit, + } + } + } + } + + if isVoltageMatch { + if value, ok := metric.GetValueFloat64(instance); ok { + if sensorEnvironmentMetricMap[iKey].voltageSensor == nil { + sensorEnvironmentMetricMap[iKey].voltageSensor = make(map[string]*sensorValue) + } + sensorEnvironmentMetricMap[iKey].voltageSensor[k] = &sensorValue{ + node: iKey, + name: sensorName, + value: value, + unit: sensorUnit, + } + } + } + + if isCurrentMatch { + if value, ok := metric.GetValueFloat64(instance); ok { + if sensorEnvironmentMetricMap[iKey].currentSensor == nil { + sensorEnvironmentMetricMap[iKey].currentSensor = make(map[string]*sensorValue) + } + sensorEnvironmentMetricMap[iKey].currentSensor[k] = &sensorValue{ + node: iKey, + name: sensorName, + value: value, + unit: sensorUnit, + } + } + } + } + } + + if len(excludedSensors) > 0 { + var excludedSensorStr string + for k, v := range excludedSensors { + excludedSensorStr += " node:" + k + " sensor:" + fmt.Sprintf("%v", v) + } + logger.Logger.Info().Str("sensor", excludedSensorStr). + Msg("sensor excluded") + } + + whrSensors := make(map[string]*sensorValue) + + for key, v := range sensorEnvironmentMetricMap { + instance, err2 := myData.NewInstance(key) + if err2 != nil { + logger.Logger.Warn().Str("key", key).Msg("instance not found") + continue + } + // set node label + instance.SetLabel("node", key) + for _, k := range eMetrics { + m := myData.GetMetric(k) + switch k { + case "power": + var sumPower float64 + if len(v.powerSensor) > 0 { + for _, v1 := range v.powerSensor { + if v1.unit == "mW" || v1.unit == "mW*hr" { + sumPower += v1.value / 1000 + } else if v1.unit == "W" || v1.unit == "W*hr" { + sumPower += v1.value + } else { + logger.Logger.Warn().Str("node", key).Str("name", v1.name).Str("unit", v1.unit).Float64("value", v1.value).Msg("unknown power unit") + } + if v1.unit == "mW*hr" || v1.unit == "W*hr" { + whrSensors[v1.name] = v1 + } + } + } else if len(v.voltageSensor) > 0 && len(v.voltageSensor) == len(v.currentSensor) { + // sort voltage keys + voltageKeys := make([]string, 0, len(v.voltageSensor)) + for k := range v.voltageSensor { + voltageKeys = append(voltageKeys, k) + } + sort.Strings(voltageKeys) + + // sort current keys + currentKeys := make([]string, 0, len(v.currentSensor)) + for k := range v.currentSensor { + currentKeys = append(currentKeys, k) + } + sort.Strings(currentKeys) + + for i := range currentKeys { + currentKey := currentKeys[i] + voltageKey := voltageKeys[i] + + // get values + currentSensorValue := v.currentSensor[currentKey] + voltageSensorValue := v.voltageSensor[voltageKey] + + // convert units + if currentSensorValue.unit == "mA" { + currentSensorValue.value = currentSensorValue.value / 1000 + } else if currentSensorValue.unit != "A" { + logger.Logger.Warn().Str("node", key).Str("unit", currentSensorValue.unit).Float64("value", currentSensorValue.value).Msg("unknown current unit") + } + + if voltageSensorValue.unit == "mV" { + voltageSensorValue.value = voltageSensorValue.value / 1000 + } else if voltageSensorValue.unit != "V" { + logger.Logger.Warn().Str("node", key).Str("unit", voltageSensorValue.unit).Float64("value", voltageSensorValue.value).Msg("unknown voltage unit") + } + + p := currentSensorValue.value * voltageSensorValue.value + + if !strings.EqualFold(voltageSensorValue.name, "in") && !strings.EqualFold(currentSensorValue.name, "in") { + p = p / 0.93 // If the sensor names to do NOT contain "IN" or "in", then we need to adjust the power to account for loss in the power supply. We will use 0.93 as the power supply efficiency factor for all systems. + } + + sumPower += p + } + } else { + logger.Logger.Warn().Str("node", key).Int("current size", len(v.currentSensor)).Int("voltage size", len(v.voltageSensor)).Msg("current and voltage sensor are ignored") + } + + numNode, ok := nodeToNumNode[key] + if !ok { + logger.Logger.Warn().Str("node", key).Msg("node not found in nodeToNumNode map") + numNode = 1 + } + sumPower = sumPower / float64(numNode) + err2 = m.SetValueFloat64(instance, sumPower) + if err2 != nil { + logger.Logger.Error().Float64("power", sumPower).Err(err2).Msg("Unable to set power") + } + case "average_ambient_temperature": + if len(v.ambientTemperature) > 0 { + aaT := util.Avg(v.ambientTemperature) + err2 = m.SetValueFloat64(instance, aaT) + if err2 != nil { + logger.Logger.Error().Float64("average_ambient_temperature", aaT).Err(err2).Msg("Unable to set average_ambient_temperature") + } + } + case "min_ambient_temperature": + maT := util.Min(v.ambientTemperature) + err2 = m.SetValueFloat64(instance, maT) + if err2 != nil { + logger.Logger.Error().Float64("min_ambient_temperature", maT).Err(err2).Msg("Unable to set min_ambient_temperature") + } + case "max_temperature": + mT := util.Max(v.nonAmbientTemperature) + err2 = m.SetValueFloat64(instance, mT) + if err2 != nil { + logger.Logger.Error().Float64("max_temperature", mT).Err(err2).Msg("Unable to set max_temperature") + } + case "average_temperature": + if len(v.nonAmbientTemperature) > 0 { + nat := util.Avg(v.nonAmbientTemperature) + err2 = m.SetValueFloat64(instance, nat) + if err2 != nil { + logger.Logger.Error().Float64("average_temperature", nat).Err(err2).Msg("Unable to set average_temperature") + } + } + case "min_temperature": + mT := util.Min(v.nonAmbientTemperature) + err2 = m.SetValueFloat64(instance, mT) + if err2 != nil { + logger.Logger.Error().Float64("min_temperature", mT).Err(err2).Msg("Unable to set min_temperature") + } + case "average_fan_speed": + if len(v.fanSpeed) > 0 { + afs := util.Avg(v.fanSpeed) + err2 = m.SetValueFloat64(instance, afs) + if err2 != nil { + logger.Logger.Error().Float64("average_fan_speed", afs).Err(err2).Msg("Unable to set average_fan_speed") + } + } + case "max_fan_speed": + mfs := util.Max(v.fanSpeed) + err2 = m.SetValueFloat64(instance, mfs) + if err2 != nil { + logger.Logger.Error().Float64("max_fan_speed", mfs).Err(err2).Msg("Unable to set max_fan_speed") + } + case "min_fan_speed": + mfs := util.Min(v.fanSpeed) + err2 = m.SetValueFloat64(instance, mfs) + if err2 != nil { + logger.Logger.Error().Float64("min_fan_speed", mfs).Err(err2).Msg("Unable to set min_fan_speed") + } + } + } + } + + if len(whrSensors) > 0 { + var whrSensorsStr string + for _, v := range whrSensors { + whrSensorsStr += " sensor:" + fmt.Sprintf("%v", *v) + } + logger.Logger.Info().Str("sensor", whrSensorsStr). + Msg("sensor with *hr units") + } + + return []*matrix.Matrix{myData}, nil +} + +func NewSensor(p *plugin.AbstractPlugin) plugin.Plugin { + return &Sensor{AbstractPlugin: p} +} + +type Sensor struct { + *plugin.AbstractPlugin + data *matrix.Matrix + client *rest.Client + instanceKeys map[string]string + instanceLabels map[string]map[string]string +} + +func (my *Sensor) Init() error { + + var err error + if err := my.InitAbc(); err != nil { + return err + } + + timeout, _ := time.ParseDuration(rest.DefaultTimeout) + if my.client, err = rest.New(conf.ZapiPoller(my.ParentParams), timeout, my.Auth); err != nil { + my.Logger.Error().Err(err).Msg("connecting") + return err + } + + if err = my.client.Init(5); err != nil { + return err + } + + my.data = matrix.New(my.Parent+".Sensor", "environment_sensor", "environment_sensor") + my.instanceKeys = make(map[string]string) + my.instanceLabels = make(map[string]map[string]string) + + // init environment metrics in plugin matrix + // create environment metric if not exists + for _, k := range eMetrics { + err := matrix.CreateMetric(k, my.data) + if err != nil { + my.Logger.Warn().Err(err).Str("key", k).Msg("error while creating metric") + } + } + return nil +} + +func (my *Sensor) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { + data := dataMap[my.Object] + // Purge and reset data + my.data.PurgeInstances() + my.data.Reset() + + // Set all global labels if they don't already exist + my.data.SetGlobalLabels(data.GetGlobalLabels()) + + // Collect chassis fru show, so we can determine if a controller's PSUs are shared or not + nodeToNumNode, err := collectChassisFRU(my.client, my.Logger) + if err != nil { + return nil, err + } + if len(nodeToNumNode) == 0 { + my.Logger.Debug().Msg("No chassis field replaceable units found") + } + + valueKey := zapiValueKey + if my.Parent == "Rest" { + valueKey = restValueKey + } + return calculateEnvironmentMetrics(data, my.Logger, valueKey, my.data, nodeToNumNode) +} diff --git a/cmd/collectors/rest/plugins/netroute/netroute.go b/cmd/collectors/rest/plugins/netroute/netroute.go index 791f601b4..ca825b5df 100644 --- a/cmd/collectors/rest/plugins/netroute/netroute.go +++ b/cmd/collectors/rest/plugins/netroute/netroute.go @@ -8,6 +8,7 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" + "github.com/tidwall/gjson" "strconv" "strings" ) @@ -67,31 +68,31 @@ func (n *NetRoute) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, err count := 0 for key, instance := range data.GetInstances() { - cluster := data.GetGlobalLabels().Get("cluster") + cluster := data.GetGlobalLabels()["cluster"] routeID := instance.GetLabel("uuid") - interfaceName := instance.GetLabel("interface_name") - interfaceAddress := instance.GetLabel("interface_address") - if interfaceName != "" && interfaceAddress != "" { - names := strings.Split(interfaceName, ",") - address := strings.Split(interfaceAddress, ",") - if len(names) == len(address) { - for i, name := range names { - index := strings.Join([]string{cluster, strconv.Itoa(count)}, "_") - interfaceInstance, err := n.data.NewInstance(index) - if err != nil { - n.Logger.Error().Err(err).Str("add instance failed for instance key", key).Send() - return nil, err - } - - for _, l := range instanceLabels { - interfaceInstance.SetLabel(l, instance.GetLabel(l)) - } - interfaceInstance.SetLabel("index", index) - interfaceInstance.SetLabel("address", address[i]) - interfaceInstance.SetLabel("name", name) - interfaceInstance.SetLabel("route_uuid", routeID) - count++ + interfaces := instance.GetLabel("interfaces") + + interfacesList := gjson.Result{Type: gjson.JSON, Raw: interfaces} + names := interfacesList.Get("name").Array() + address := interfacesList.Get("address").Array() + + if len(names) == len(address) { + for i, name := range names { + index := strings.Join([]string{cluster, strconv.Itoa(count)}, "_") + interfaceInstance, err := n.data.NewInstance(index) + if err != nil { + n.Logger.Error().Err(err).Str("add instance failed for instance key", key).Send() + return nil, err } + + for _, l := range instanceLabels { + interfaceInstance.SetLabel(l, instance.GetLabel(l)) + } + interfaceInstance.SetLabel("index", index) + interfaceInstance.SetLabel("address", address[i].String()) + interfaceInstance.SetLabel("name", name.String()) + interfaceInstance.SetLabel("route_uuid", routeID) + count++ } } } diff --git a/cmd/collectors/rest/plugins/qtree/qtree.go b/cmd/collectors/rest/plugins/qtree/qtree.go index 3c5b3cfd5..cf780cbeb 100644 --- a/cmd/collectors/rest/plugins/qtree/qtree.go +++ b/cmd/collectors/rest/plugins/qtree/qtree.go @@ -9,7 +9,6 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/cmd/tools/rest" "github.com/netapp/harvest/v2/pkg/conf" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -24,7 +23,7 @@ type Qtree struct { *plugin.AbstractPlugin data *matrix.Matrix instanceKeys map[string]string - instanceLabels map[string]*dict.Dict + instanceLabels map[string]map[string]string client *rest.Client query string quotaType []string @@ -44,13 +43,13 @@ func (q *Qtree) Init() error { "space.used.hard_limit_percent => disk_used_pct_disk_limit", "space.used.soft_limit_percent => disk_used_pct_soft_disk_limit", "space.soft_limit => soft_disk_limit", - //"disk-used-pct-threshold" # deprecated and workaround to use same as disk_used_pct_soft_disk_limit + // "disk-used-pct-threshold" # deprecated and workaround to use same as disk_used_pct_soft_disk_limit "files.hard_limit => file_limit", "files.used.total => files_used", "files.used.hard_limit_percent => files_used_pct_file_limit", "files.used.soft_limit_percent => files_used_pct_soft_file_limit", "files.soft_limit => soft_file_limit", - //"threshold", # deprecated + // "threshold", # deprecated } if err = q.InitAbc(); err != nil { @@ -78,7 +77,7 @@ func (q *Qtree) Init() error { q.data = matrix.New(q.Parent+".Qtree", "quota", "quota") q.instanceKeys = make(map[string]string) - q.instanceLabels = make(map[string]*dict.Dict) + q.instanceLabels = make(map[string]map[string]string) q.historicalLabels = false if q.Params.HasChildS("historicalLabels") { @@ -87,7 +86,7 @@ func (q *Qtree) Init() error { // apply all instance keys, instance labels from parent (qtree.yaml) to all quota metrics if exportOption := q.ParentParams.GetChildS("export_options"); exportOption != nil { - //parent instancekeys would be added in plugin metrics + // parent instancekeys would be added in plugin metrics if parentKeys := exportOption.GetChildS("instance_keys"); parentKeys != nil { for _, parentKey := range parentKeys.GetAllChildContentS() { instanceKeys.NewChildS("", parentKey) @@ -167,7 +166,7 @@ func (q *Qtree) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) } quotaCount := 0 - cluster, _ := data.GetGlobalLabels().GetHas("cluster") + cluster := data.GetGlobalLabels()["cluster"] if q.historicalLabels { // In 22.05, populate metrics with qtree prefix and old labels @@ -251,7 +250,7 @@ func (q *Qtree) handlingHistoricalMetrics(result []gjson.Result, data *matrix.Ma } } - //set labels + // set labels quotaInstance.SetLabel("type", quotaType) quotaInstance.SetLabel("qtree", tree) quotaInstance.SetLabel("volume", volume) @@ -320,7 +319,7 @@ func (q *Qtree) handlingQuotaMetrics(result []gjson.Result, cluster string, quot q.Logger.Debug().Msgf("add (%s) instance: %v", attribute, err) return err } - //set labels + // set labels quotaInstance.SetLabel("type", quotaType) quotaInstance.SetLabel("qtree", tree) quotaInstance.SetLabel("volume", volume) diff --git a/cmd/collectors/rest/plugins/securityaccount/securityaccount.go b/cmd/collectors/rest/plugins/securityaccount/securityaccount.go index 96c7f9909..6ab0797a7 100644 --- a/cmd/collectors/rest/plugins/securityaccount/securityaccount.go +++ b/cmd/collectors/rest/plugins/securityaccount/securityaccount.go @@ -103,7 +103,7 @@ func (s *SecurityAccount) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matr return nil, err } - for k, v := range securityAccountInstance.GetLabels().Map() { + for k, v := range securityAccountInstance.GetLabels() { securityAccountNewInstance.SetLabel(k, v) } securityAccountNewInstance.SetLabel("applications", application) diff --git a/cmd/collectors/rest/plugins/sensor/sensor.go b/cmd/collectors/rest/plugins/sensor/sensor.go deleted file mode 100644 index fa0b241e5..000000000 --- a/cmd/collectors/rest/plugins/sensor/sensor.go +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright NetApp Inc, 2021 All rights reserved - */ - -package sensor - -import ( - "fmt" - "github.com/netapp/harvest/v2/cmd/collectors" - "github.com/netapp/harvest/v2/cmd/poller/plugin" - "github.com/netapp/harvest/v2/pkg/dict" - "github.com/netapp/harvest/v2/pkg/matrix" - "github.com/netapp/harvest/v2/pkg/util" - "regexp" - "sort" - "strings" -) - -type Sensor struct { - *plugin.AbstractPlugin - data *matrix.Matrix - instanceKeys map[string]string - instanceLabels map[string]*dict.Dict -} - -type sensorEnvironmentMetric struct { - key string - ambientTemperature []float64 - nonAmbientTemperature []float64 - fanSpeed []float64 - powerSensor map[string]*sensorValue - voltageSensor map[string]*sensorValue - currentSensor map[string]*sensorValue -} - -type sensorValue struct { - name string - value float64 - unit string -} - -func New(p *plugin.AbstractPlugin) plugin.Plugin { - return &Sensor{AbstractPlugin: p} -} - -var ambientRegex = regexp.MustCompile(`^(Ambient Temp|Ambient Temp \d|PSU\d AmbTemp|PSU\d Inlet|PSU\d Inlet Temp|In Flow Temp|Front Temp|Bat_Ambient \d|Riser Inlet Temp)$`) -var powerInRegex = regexp.MustCompile(`^PSU\d (InPwr Monitor|InPower|PIN|Power In)$`) -var voltageRegex = regexp.MustCompile(`^PSU\d (\d+V|InVoltage|VIN|AC In Volt)$`) -var currentRegex = regexp.MustCompile(`^PSU\d (\d+V Curr|Curr|InCurrent|Curr IIN|AC In Curr)$`) -var eMetrics = []string{ - "average_ambient_temperature", - "average_fan_speed", - "average_temperature", - "max_fan_speed", - "max_temperature", - "min_ambient_temperature", - "min_fan_speed", - "min_temperature", - "power", -} - -func (my *Sensor) Init() error { - if err := my.InitAbc(); err != nil { - return err - } - - my.data = matrix.New(my.Parent+".Sensor", "environment_sensor", "environment_sensor") - my.instanceKeys = make(map[string]string) - my.instanceLabels = make(map[string]*dict.Dict) - - // init environment metrics in plugin matrix - // create environment metric if not exists - for _, k := range eMetrics { - err := matrix.CreateMetric(k, my.data) - if err != nil { - my.Logger.Warn().Err(err).Str("key", k).Msg("error while creating metric") - } - } - return nil -} - -func (my *Sensor) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { - data := dataMap[my.Object] - // Purge and reset data - my.data.PurgeInstances() - my.data.Reset() - - // Set all global labels from zapi.go if already not exist - my.data.SetGlobalLabels(data.GetGlobalLabels()) - - return my.calculateEnvironmentMetrics(data) -} - -func (my *Sensor) calculateEnvironmentMetrics(data *matrix.Matrix) ([]*matrix.Matrix, error) { - sensorEnvironmentMetricMap := make(map[string]*sensorEnvironmentMetric) - excludedSensors := make(map[string][]sensorValue) - - for k, instance := range data.GetInstances() { - iKey := instance.GetLabel("node") - if iKey == "" { - my.Logger.Warn().Str("key", k).Msg("missing node for instance") - continue - } - sensorName := instance.GetLabel("sensor") - if sensorName == "" { - my.Logger.Warn().Str("key", k).Msg("missing sensor name for instance") - continue - } - if _, ok := sensorEnvironmentMetricMap[iKey]; !ok { - sensorEnvironmentMetricMap[iKey] = &sensorEnvironmentMetric{key: iKey, ambientTemperature: []float64{}, nonAmbientTemperature: []float64{}, fanSpeed: []float64{}} - } - for mKey, metric := range data.GetMetrics() { - if mKey == "value" { - sensorType := instance.GetLabel("type") - sensorUnit := instance.GetLabel("unit") - - isAmbientMatch := ambientRegex.MatchString(sensorName) - isPowerMatch := powerInRegex.MatchString(sensorName) - isVoltageMatch := voltageRegex.MatchString(sensorName) - isCurrentMatch := currentRegex.MatchString(sensorName) - - my.Logger.Debug().Bool("isAmbientMatch", isAmbientMatch). - Bool("isPowerMatch", isPowerMatch). - Bool("isVoltageMatch", isVoltageMatch). - Bool("isCurrentMatch", isCurrentMatch). - Str("sensorType", sensorType). - Str("sensorUnit", sensorUnit). - Str("sensorName", sensorName). - Msg("") - - if sensorType == "thermal" && isAmbientMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - sensorEnvironmentMetricMap[iKey].ambientTemperature = append(sensorEnvironmentMetricMap[iKey].ambientTemperature, value) - } - } - - if sensorType == "thermal" && !isAmbientMatch { - // Exclude temperature sensors that contains sensor name `Margin` and value < 0 - value, ok := metric.GetValueFloat64(instance) - if value > 0 && !strings.Contains(sensorName, "Margin") { - if ok { - sensorEnvironmentMetricMap[iKey].nonAmbientTemperature = append(sensorEnvironmentMetricMap[iKey].nonAmbientTemperature, value) - } - } else { - excludedSensors[iKey] = append(excludedSensors[iKey], sensorValue{ - name: sensorName, - value: value, - }) - } - } - - if sensorType == "fan" { - if value, ok := metric.GetValueFloat64(instance); ok { - sensorEnvironmentMetricMap[iKey].fanSpeed = append(sensorEnvironmentMetricMap[iKey].fanSpeed, value) - } - } - - if isPowerMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if !collectors.IsValidUnit(sensorUnit) { - my.Logger.Warn().Str("unit", sensorUnit).Float64("value", value).Msg("unknown power unit") - } else { - if sensorEnvironmentMetricMap[iKey].powerSensor == nil { - sensorEnvironmentMetricMap[iKey].powerSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].powerSensor[k] = &sensorValue{name: sensorName, value: value, unit: sensorUnit} - } - } - } - - if isVoltageMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if sensorEnvironmentMetricMap[iKey].voltageSensor == nil { - sensorEnvironmentMetricMap[iKey].voltageSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].voltageSensor[k] = &sensorValue{name: sensorName, value: value, unit: sensorUnit} - } - } - - if isCurrentMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if sensorEnvironmentMetricMap[iKey].currentSensor == nil { - sensorEnvironmentMetricMap[iKey].currentSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].currentSensor[k] = &sensorValue{name: sensorName, value: value, unit: sensorUnit} - } - } - } - } - } - - if len(excludedSensors) > 0 { - var excludedSensorStr string - for k, v := range excludedSensors { - excludedSensorStr += " node:" + k + " sensor:" + fmt.Sprintf("%v", v) - } - my.Logger.Info().Str("sensor", excludedSensorStr). - Msg("sensor excluded") - } - - whrSensors := make(map[string]*sensorValue) - - for key, v := range sensorEnvironmentMetricMap { - instance, err := my.data.NewInstance(key) - if err != nil { - my.Logger.Warn().Str("key", key).Msg("instance not found") - continue - } - // set node label - instance.SetLabel("node", key) - for _, k := range eMetrics { - m := my.data.GetMetric(k) - switch k { - case "power": - var sumPower float64 - if len(v.powerSensor) > 0 { - for _, v1 := range v.powerSensor { - if v1.unit == "mW" || v1.unit == "mW*hr" { - sumPower += v1.value / 1000 - } else if v1.unit == "W" || v1.unit == "W*hr" { - sumPower += v1.value - } else { - my.Logger.Warn().Str("unit", v1.unit).Float64("value", v1.value).Msg("unknown power unit") - } - if v1.unit == "mW*hr" || v1.unit == "W*hr" { - whrSensors[v1.name] = v1 - } - } - } else if len(v.voltageSensor) > 0 && len(v.voltageSensor) == len(v.currentSensor) { - // sort voltage keys - voltageKeys := make([]string, 0, len(v.voltageSensor)) - for k := range v.voltageSensor { - voltageKeys = append(voltageKeys, k) - } - sort.Strings(voltageKeys) - - // sort current keys - currentKeys := make([]string, 0, len(v.currentSensor)) - for k := range v.currentSensor { - currentKeys = append(currentKeys, k) - } - sort.Strings(currentKeys) - - for i := range currentKeys { - currentKey := currentKeys[i] - voltageKey := voltageKeys[i] - - // get values - currentSensorValue := v.currentSensor[currentKey] - voltageSensorValue := v.voltageSensor[voltageKey] - - // convert units - if currentSensorValue.unit == "mA" { - currentSensorValue.value = currentSensorValue.value / 1000 - } else if currentSensorValue.unit != "A" { - my.Logger.Warn().Str("unit", currentSensorValue.unit).Float64("value", currentSensorValue.value).Msg("unknown current unit") - } - - if voltageSensorValue.unit == "mV" { - voltageSensorValue.value = voltageSensorValue.value / 1000 - } else if voltageSensorValue.unit != "V" { - my.Logger.Warn().Str("unit", voltageSensorValue.unit).Float64("value", voltageSensorValue.value).Msg("unknown voltage unit") - } - - p := currentSensorValue.value * voltageSensorValue.value - - if !strings.EqualFold(voltageSensorValue.name, "in") && !strings.EqualFold(currentSensorValue.name, "in") { - p = p / 0.93 // If the sensor names to do NOT contain "IN" or "in", then we need to adjust the power to account for loss in the power supply. We will use 0.93 as the power supply efficiency factor for all systems. - } - - sumPower += p - } - } else { - my.Logger.Warn().Int("current size", len(v.currentSensor)).Int("voltage size", len(v.voltageSensor)).Msg("current and voltage sensor are ignored") - } - - err = m.SetValueFloat64(instance, sumPower) - if err != nil { - my.Logger.Error().Float64("power", sumPower).Err(err).Msg("Unable to set power") - } - case "average_ambient_temperature": - if len(v.ambientTemperature) > 0 { - aaT := util.Avg(v.ambientTemperature) - err = m.SetValueFloat64(instance, aaT) - if err != nil { - my.Logger.Error().Float64("average_ambient_temperature", aaT).Err(err).Msg("Unable to set average_ambient_temperature") - } - } - case "min_ambient_temperature": - maT := util.Min(v.ambientTemperature) - err = m.SetValueFloat64(instance, maT) - if err != nil { - my.Logger.Error().Float64("min_ambient_temperature", maT).Err(err).Msg("Unable to set min_ambient_temperature") - } - case "max_temperature": - mT := util.Max(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, mT) - if err != nil { - my.Logger.Error().Float64("max_temperature", mT).Err(err).Msg("Unable to set max_temperature") - } - case "average_temperature": - if len(v.nonAmbientTemperature) > 0 { - nat := util.Avg(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, nat) - if err != nil { - my.Logger.Error().Float64("average_temperature", nat).Err(err).Msg("Unable to set average_temperature") - } - } - case "min_temperature": - mT := util.Min(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, mT) - if err != nil { - my.Logger.Error().Float64("min_temperature", mT).Err(err).Msg("Unable to set min_temperature") - } - case "average_fan_speed": - if len(v.fanSpeed) > 0 { - afs := util.Avg(v.fanSpeed) - err = m.SetValueFloat64(instance, afs) - if err != nil { - my.Logger.Error().Float64("average_fan_speed", afs).Err(err).Msg("Unable to set average_fan_speed") - } - } - case "max_fan_speed": - mfs := util.Max(v.fanSpeed) - err = m.SetValueFloat64(instance, mfs) - if err != nil { - my.Logger.Error().Float64("max_fan_speed", mfs).Err(err).Msg("Unable to set max_fan_speed") - } - case "min_fan_speed": - mfs := util.Min(v.fanSpeed) - err = m.SetValueFloat64(instance, mfs) - if err != nil { - my.Logger.Error().Float64("min_fan_speed", mfs).Err(err).Msg("Unable to set min_fan_speed") - } - } - } - } - - if len(whrSensors) > 0 { - var whrSensorsStr string - for _, v := range whrSensors { - whrSensorsStr += " sensor:" + fmt.Sprintf("%v", *v) - } - my.Logger.Info().Str("sensor", whrSensorsStr). - Msg("sensor with *hr units") - } - - return []*matrix.Matrix{my.data}, nil -} diff --git a/cmd/collectors/rest/plugins/snapmirror/snapmirror.go b/cmd/collectors/rest/plugins/snapmirror/snapmirror.go index 2e9bd27ce..b6efab821 100644 --- a/cmd/collectors/rest/plugins/snapmirror/snapmirror.go +++ b/cmd/collectors/rest/plugins/snapmirror/snapmirror.go @@ -91,7 +91,7 @@ func (my *SnapMirror) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, if my.currentVal >= PluginInvocationRate { my.currentVal = 0 - if cluster, ok := data.GetGlobalLabels().GetHas("cluster"); ok { + if cluster, ok := data.GetGlobalLabels()["cluster"]; ok { if err := my.getSVMPeerData(cluster); err != nil { return nil, err } @@ -135,7 +135,7 @@ func (my *SnapMirror) getSVMPeerData(cluster string) error { func (my *SnapMirror) updateSMLabels(data *matrix.Matrix) { var keys []string - cluster, _ := data.GetGlobalLabels().GetHas("cluster") + cluster := data.GetGlobalLabels()["cluster"] lastTransferSizeMetric := data.GetMetric("last_transfer_size") lagTimeMetric := data.GetMetric("lag_time") @@ -204,7 +204,7 @@ func (my *SnapMirror) handleCGRelationships(data *matrix.Matrix, keys []string) continue } - for k, v := range cgInstance.GetLabels().Map() { + for k, v := range cgInstance.GetLabels() { cgVolumeInstance.SetLabel(k, v) } cgVolumeInstance.SetLabel("relationship_id", cgVolumeInstanceKey) diff --git a/cmd/collectors/rest/plugins/volume/volume.go b/cmd/collectors/rest/plugins/volume/volume.go index 966dc6df1..d57e4c19b 100644 --- a/cmd/collectors/rest/plugins/volume/volume.go +++ b/cmd/collectors/rest/plugins/volume/volume.go @@ -165,7 +165,7 @@ func (my *Volume) handleARWProtection(data *matrix.Matrix) { } } - arwInstanceKey := data.GetGlobalLabels().Get("cluster") + data.GetGlobalLabels().Get("datacenter") + arwInstanceKey := data.GetGlobalLabels()["cluster"] + data.GetGlobalLabels()["datacenter"] if arwInstance, err = my.arw.NewInstance(arwInstanceKey); err != nil { my.Logger.Error().Err(err).Str("arwInstanceKey", arwInstanceKey).Msg("Failed to create arw instance") return diff --git a/cmd/collectors/rest/plugins/volumeanalytics/volumeanalytics.go b/cmd/collectors/rest/plugins/volumeanalytics/volumeanalytics.go index 1b24d7312..d89a3e14a 100644 --- a/cmd/collectors/rest/plugins/volumeanalytics/volumeanalytics.go +++ b/cmd/collectors/rest/plugins/volumeanalytics/volumeanalytics.go @@ -88,7 +88,7 @@ func (v *VolumeAnalytics) initMatrix() error { func (v *VolumeAnalytics) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { data := dataMap[v.Object] - cluster, _ := data.GetGlobalLabels().GetHas("cluster") + cluster := data.GetGlobalLabels()["cluster"] clusterVersion := v.client.Cluster().GetVersion() ontapVersion, err := goversion.NewVersion(clusterVersion) if err != nil { @@ -152,7 +152,7 @@ func (v *VolumeAnalytics) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matr instance.SetLabel("dir_name", name) instance.SetLabel("index", cluster+"_"+strconv.Itoa(index)) // copy all labels - for k1, v1 := range dataInstance.GetLabels().Map() { + for k1, v1 := range dataInstance.GetLabels() { instance.SetLabel(k1, v1) } if bytesUsed != "" { diff --git a/cmd/collectors/rest/rest.go b/cmd/collectors/rest/rest.go index e94dcba2f..3dc51a0a2 100644 --- a/cmd/collectors/rest/rest.go +++ b/cmd/collectors/rest/rest.go @@ -2,6 +2,7 @@ package rest import ( "fmt" + "github.com/netapp/harvest/v2/cmd/collectors" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/certificate" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/disk" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/health" @@ -11,7 +12,6 @@ import ( "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/qospolicyfixed" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/qtree" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/securityaccount" - "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/sensor" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/shelf" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/snapmirror" "github.com/netapp/harvest/v2/cmd/collectors/rest/plugins/svm" @@ -403,7 +403,7 @@ func (r *Rest) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin case "SVM": return svm.New(abc) case "Sensor": - return sensor.New(abc) + return collectors.NewSensor(abc) case "Shelf": return shelf.New(abc) case "SecurityAccount": @@ -502,6 +502,7 @@ func (r *Rest) HandleResults(result []gjson.Result, prop *prop, isEndPoint bool) labelString := r.String() labelArray = append(labelArray, labelString) } + sort.Strings(labelArray) instance.SetLabel(display, strings.Join(labelArray, ",")) } else { instance.SetLabel(display, value.String()) @@ -581,7 +582,7 @@ func (r *Rest) CollectAutoSupport(p *collector.Payload) { exporterTypes = append(exporterTypes, exporter.GetClass()) } - var counters = make([]string, 0) + var counters = make([]string, 0, len(r.Prop.Counters)) for k := range r.Prop.Counters { counters = append(counters, k) } @@ -598,6 +599,16 @@ func (r *Rest) CollectAutoSupport(p *collector.Payload) { } // Add collector information + md := r.GetMetadata() + info := collector.InstanceInfo{ + Count: md.LazyValueInt64("instances", "data"), + DataPoints: md.LazyValueInt64("metrics", "data"), + PollTime: md.LazyValueInt64("poll_time", "data"), + APITime: md.LazyValueInt64("api_time", "data"), + ParseTime: md.LazyValueInt64("parse_time", "data"), + PluginTime: md.LazyValueInt64("plugin_time", "data"), + } + p.AddCollectorAsup(collector.AsupCollector{ Name: r.Name, Query: r.Prop.Query, @@ -608,6 +619,7 @@ func (r *Rest) CollectAutoSupport(p *collector.Payload) { }, Schedules: schedules, ClientTimeout: r.Client.Timeout.String(), + InstanceInfo: &info, }) if (r.Name == "Rest" && (r.Object == "Volume" || r.Object == "Node")) || r.Name == "Ems" { @@ -619,16 +631,6 @@ func (r *Rest) CollectAutoSupport(p *collector.Payload) { } p.Target.ClusterUUID = r.Client.Cluster().UUID - md := r.GetMetadata() - info := collector.InstanceInfo{ - Count: md.LazyValueInt64("instances", "data"), - DataPoints: md.LazyValueInt64("metrics", "data"), - PollTime: md.LazyValueInt64("poll_time", "data"), - APITime: md.LazyValueInt64("api_time", "data"), - ParseTime: md.LazyValueInt64("parse_time", "data"), - PluginTime: md.LazyValueInt64("plugin_time", "data"), - } - if r.Object == "Node" || r.Name == "ems" { var ( nodeIds []collector.ID diff --git a/cmd/collectors/rest/rest_test.go b/cmd/collectors/rest/rest_test.go index 332c3386f..f35eebbd2 100644 --- a/cmd/collectors/rest/rest_test.go +++ b/cmd/collectors/rest/rest_test.go @@ -152,12 +152,11 @@ func volumeEndpoints(e *endPoint) ([]gjson.Result, error) { func newRest(object string, path string) *Rest { var err error - opts := options.Options{ - Poller: pollerName, - HomePath: "testdata", - IsTest: true, - } - ac := collector.New("Rest", object, &opts, params(object, path), nil) + opts := options.New(options.WithConfPath("testdata/conf")) + opts.Poller = pollerName + opts.HomePath = "testdata" + opts.IsTest = true + ac := collector.New("Rest", object, opts, params(object, path), nil) r := Rest{} err = r.Init(ac) if err != nil { diff --git a/cmd/collectors/rest/templating.go b/cmd/collectors/rest/templating.go index e0ba07193..a29dc9242 100644 --- a/cmd/collectors/rest/templating.go +++ b/cmd/collectors/rest/templating.go @@ -13,23 +13,13 @@ import ( func (r *Rest) LoadTemplate() (string, error) { - var ( - template *node.Node - templatePath string - err error - ) - - // import template - if template, templatePath, err = r.ImportSubTemplate( - "", - TemplateFn(r.Params, r.Object), - r.Client.Cluster().Version, - ); err != nil { + template, path, err := r.ImportSubTemplate("", TemplateFn(r.Params, r.Object), r.Client.Cluster().Version) + if err != nil { return "", err } r.Params.Union(template) - return templatePath, nil + return path, nil } func (r *Rest) InitCache() error { @@ -94,10 +84,10 @@ func HandleDuration(value string) float64 { seconds := 0.0 - //years - //months + // years + // months - //days + // days if matches[3] != "" { f, err := strconv.ParseFloat(matches[3], 64) if err != nil { @@ -107,7 +97,7 @@ func HandleDuration(value string) float64 { seconds += f * 24 * 60 * 60 } - //hours + // hours if matches[4] != "" { f, err := strconv.ParseFloat(matches[4], 64) if err != nil { @@ -117,7 +107,7 @@ func HandleDuration(value string) float64 { seconds += f * 60 * 60 } - //minutes + // minutes if matches[5] != "" { f, err := strconv.ParseFloat(matches[5], 64) if err != nil { @@ -127,7 +117,7 @@ func HandleDuration(value string) float64 { seconds += f * 60 } - //seconds & milliseconds + // seconds & milliseconds if matches[6] != "" { f, err := strconv.ParseFloat(matches[6], 64) if err != nil { diff --git a/cmd/collectors/restperf/plugins/disk/disk.go b/cmd/collectors/restperf/plugins/disk/disk.go index 3f1e49b63..6d586df6f 100644 --- a/cmd/collectors/restperf/plugins/disk/disk.go +++ b/cmd/collectors/restperf/plugins/disk/disk.go @@ -4,7 +4,6 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/cmd/tools/rest" "github.com/netapp/harvest/v2/pkg/conf" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -44,7 +43,7 @@ type Disk struct { shelfData map[string]*matrix.Matrix powerData map[string]*matrix.Matrix instanceKeys map[string]string - instanceLabels map[string]*dict.Dict + instanceLabels map[string]map[string]string client *rest.Client query string aggrMap map[string]*aggregate @@ -65,7 +64,6 @@ type aggregate struct { isShared bool power float64 derivedType RaidAggrDerivedType - export bool } type disk struct { @@ -170,7 +168,7 @@ func (d *Disk) Init() error { d.powerData = make(map[string]*matrix.Matrix) d.instanceKeys = make(map[string]string) - d.instanceLabels = make(map[string]*dict.Dict) + d.instanceLabels = make(map[string]map[string]string) for attribute, childObj := range shelfMetric { @@ -181,7 +179,7 @@ func (d *Disk) Init() error { objectName = strings.TrimSpace(x[1]) } - d.instanceLabels[attribute] = dict.New() + d.instanceLabels[attribute] = make(map[string]string) d.shelfData[attribute] = matrix.New(d.Parent+".Shelf", "shelf_"+objectName, "shelf_"+objectName) d.shelfData[attribute].SetGlobalLabel("datacenter", d.ParentParams.GetChildContentS("datacenter")) @@ -202,11 +200,11 @@ func (d *Disk) Init() error { switch kind { case "key": d.instanceKeys[attribute] = metricName - d.instanceLabels[attribute].Set(metricName, display) + d.instanceLabels[attribute][metricName] = display instanceKeys.NewChildS("", display) d.Logger.Debug().Msgf("added instance key: (%s) [%s]", attribute, display) case "label": - d.instanceLabels[attribute].Set(metricName, display) + d.instanceLabels[attribute][metricName] = display instanceLabels.NewChildS("", display) d.Logger.Debug().Msgf("added instance label: (%s) [%s]", attribute, display) case "float": @@ -306,7 +304,7 @@ func (d *Disk) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) } d.Logger.Debug().Msgf("add (%s) instance: %s.%s.%s", attribute, shelfSerialNumber, attribute, key) - for label, labelDisplay := range d.instanceLabels[attribute].Map() { + for label, labelDisplay := range d.instanceLabels[attribute] { if value := obj.Get(label); value.Exists() { if value.IsArray() { var labelArray []string @@ -332,7 +330,7 @@ func (d *Disk) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) shelfChildInstance.SetLabel("shelf", shelfName) // Each child would have different possible values which is an ugly way to write all of them, - // so normal value would be mapped to 1 and rest all are mapped to 0. + // so normal value would be mapped to 1, and the rest all are mapped to 0. if shelfChildInstance.GetLabel("status") == "normal" { _ = statusMetric.SetValueInt64(shelfChildInstance, 1) } else { @@ -408,7 +406,6 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) if totalTransfers == nil { return output, errs.New(errs.ErrNoMetric, "total_transfer_count") } - totaliops := make(map[string]float64) // calculate power for returned disks in perf response for _, instance := range data.GetInstances() { @@ -433,9 +430,7 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) sh, ok := d.ShelfMap[shelfID] if ok { diskPower := v * sh.power / sh.iops - totaliops[shelfID] = totaliops[shelfID] + v - aggrPower := a.power + diskPower - a.power = aggrPower + a.power += diskPower } } else { d.Logger.Warn().Str("diskUUID", diskUUID). @@ -485,25 +480,24 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) // fill aggr power matrix with power calculated above for k, v := range d.aggrMap { - if v.export { - instanceKey := k - instance, err := aggrData.NewInstance(instanceKey) - if err != nil { - d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to add instance") - continue - } - instance.SetLabel("aggr", k) - instance.SetLabel("derivedType", string(v.derivedType)) - instance.SetLabel("node", v.node) - - m := aggrData.GetMetric("power") - err = m.SetValueFloat64(instance, v.power) - if err != nil { - d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to set value") - continue - } + instanceKey := k + instance, err := aggrData.NewInstance(instanceKey) + if err != nil { + d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to add instance") + continue + } + instance.SetLabel("aggr", k) + instance.SetLabel("derivedType", string(v.derivedType)) + instance.SetLabel("node", v.node) + + m := aggrData.GetMetric("power") + err = m.SetValueFloat64(instance, v.power) + if err != nil { + d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to set value") + continue } } + output = append(output, aggrData) return output, nil @@ -605,7 +599,7 @@ func (d *Disk) getAggregates() error { query := "api/private/cli/aggr" - href := rest.BuildHref("", "aggregate,composite,node,uses_shared_disks,root,storage_type", nil, "", "", "", "", query) + href := rest.BuildHref("", "aggregate,composite,node,uses_shared_disks,storage_type", nil, "", "", "", "", query) records, err := rest.Fetch(d.client, href) if err != nil { @@ -625,11 +619,9 @@ func (d *Disk) getAggregates() error { aggrName := aggr.Get("aggregate").String() usesSharedDisks := aggr.Get("uses_shared_disks").String() isC := aggr.Get("composite").String() - isR := aggr.Get("root").String() aggregateType := aggr.Get("storage_type").String() nodeName := aggr.Get("node").String() isShared := usesSharedDisks == "true" - isRootAggregate := isR == "true" isComposite := isC == "true" derivedType := getAggregateDerivedType(aggregateType, isComposite, isShared) d.aggrMap[aggrName] = &aggregate{ @@ -637,7 +629,6 @@ func (d *Disk) getAggregates() error { isShared: isShared, derivedType: derivedType, node: nodeName, - export: !isRootAggregate, } } return nil diff --git a/cmd/collectors/restperf/plugins/volume/volume.go b/cmd/collectors/restperf/plugins/volume/volume.go index 1ab572bc9..247df21db 100644 --- a/cmd/collectors/restperf/plugins/volume/volume.go +++ b/cmd/collectors/restperf/plugins/volume/volume.go @@ -4,6 +4,7 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/set" + "maps" "regexp" "sort" "strings" @@ -44,6 +45,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error re := regexp.MustCompile(`^(.*)__(\d{4})$`) + fgAggrMap := make(map[string]*set.Set) flexgroupAggrsMap := make(map[string]*set.Set) // volume_aggr_labels metric is deprecated now and will be removed later. metricName := "labels" @@ -67,17 +69,17 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error key := i.GetLabel("svm") + "." + match[1] if cache.GetInstance(key) == nil { fg, _ := cache.NewInstance(key) - fg.SetLabels(i.GetLabels().Copy()) + fg.SetLabels(maps.Clone(i.GetLabels())) fg.SetLabel("volume", match[1]) - // Flexgroup don't show any aggregate, node - fg.SetLabel("aggr", "") + // Flexgroup don't show any node fg.SetLabel("node", "") fg.SetLabel(style, "flexgroup") + fgAggrMap[key] = set.New() } if volumeAggrmetric.GetInstance(key) == nil { flexgroupInstance, _ := volumeAggrmetric.NewInstance(key) - flexgroupInstance.SetLabels(i.GetLabels().Copy()) + flexgroupInstance.SetLabels(maps.Clone(i.GetLabels())) flexgroupInstance.SetLabel("volume", match[1]) // Flexgroup don't show any node flexgroupInstance.SetLabel("node", "") @@ -87,6 +89,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Error().Err(err).Str("metric", metricName).Msg("Unable to set value on metric") } } + fgAggrMap[key].Add(i.GetLabel("aggr")) flexgroupAggrsMap[key].Add(i.GetLabel("aggr")) i.SetLabel(style, "flexgroup_constituent") i.SetExportable(false) @@ -98,7 +101,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Error().Err(err).Str("key", key).Msg("Failed to create new instance") continue } - flexvolInstance.SetLabels(i.GetLabels().Copy()) + flexvolInstance.SetLabels(maps.Clone(i.GetLabels())) flexvolInstance.SetLabel(style, "flexvol") if err := metric.SetValueFloat64(flexvolInstance, 1); err != nil { v.Logger.Error().Err(err).Str("metric", metricName).Msg("Unable to set value on metric") @@ -108,7 +111,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Debug().Int("flexgroup volume count", len(cache.GetInstances())).Msg("") - //cache.Reset() + // cache.Reset() // create summary for _, i := range data.GetInstances() { @@ -131,6 +134,11 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error continue } + // set aggrs label for fg, make sure the order of aggregate is same for each poll + aggrs := fgAggrMap[key].Values() + sort.Strings(aggrs) + fg.SetLabel("aggr", strings.Join(aggrs, ",")) + for mkey, m := range data.GetMetrics() { if !m.IsExportable() && m.GetType() != "float64" { diff --git a/cmd/collectors/restperf/restperf.go b/cmd/collectors/restperf/restperf.go index 80aada89c..60a05be31 100644 --- a/cmd/collectors/restperf/restperf.go +++ b/cmd/collectors/restperf/restperf.go @@ -16,11 +16,13 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/cmd/tools/rest" "github.com/netapp/harvest/v2/pkg/color" + "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/set" "github.com/netapp/harvest/v2/pkg/tree/node" "github.com/netapp/harvest/v2/pkg/util" + "github.com/rs/zerolog" "github.com/tidwall/gjson" "path" "strconv" @@ -267,7 +269,20 @@ func (r *RestPerf) pollCounter(records []gjson.Result) (map[string]*matrix.Matri } name := strings.Clone(c.Get("name").String()) + dataType := strings.Clone(c.Get("type").String()) + if p := r.GetOverride(name); p != "" { + dataType = p + } + if _, has := r.Prop.Metrics[name]; has { + if strings.Contains(dataType, "string") { + if _, ok := r.Prop.InstanceLabels[name]; !ok { + r.Prop.InstanceLabels[name] = r.Prop.Counters[name] + } + // remove from metrics + delete(r.Prop.Metrics, name) + return true + } d := strings.Clone(c.Get("denominator.name").String()) if d != "" { if _, has := r.Prop.Metrics[d]; !has { @@ -463,18 +478,12 @@ func parseMetricResponse(instanceData gjson.Result, metric string) *metricRespon for _, name := range t.Array() { if name.String() == metric { metricPath := "counters.#(name=" + metric + ")" - many := gjson.GetMany(instanceDataS, - metricPath+".value", - metricPath+".values", - metricPath+".labels", - metricPath+".counters.#.label", - metricPath+".counters.#.values", - ) - value := many[0] - values := many[1] - labels := many[2] - subLabels := many[3] - subValues := many[4] + many := gjson.Parse(instanceDataS) + value := many.Get(metricPath + ".value") + values := many.Get(metricPath + ".values") + labels := many.Get(metricPath + ".labels") + subLabels := many.Get(metricPath + ".counters.#.label") + subValues := many.Get(metricPath + ".counters.#.values") if value.String() != "" { return &metricResponse{value: strings.Clone(value.String()), label: ""} } @@ -1443,7 +1452,13 @@ func (r *RestPerf) updateQosLabels(qos gjson.Result, instance *matrix.Instance, r.Logger.Trace().Str("label", label).Str("key", key).Msg("Missing label") } } - r.Logger.Debug().Str("query", r.Prop.Query).Str("key", key).Str("qos labels", instance.GetLabels().String()).Send() + if r.Logger.GetLevel() == zerolog.DebugLevel { + r.Logger.Debug(). + Str("query", r.Prop.Query). + Str("key", key). + Str("qos labels", dict.String(instance.GetLabels())). + Send() + } } } diff --git a/cmd/collectors/restperf/restperf_test.go b/cmd/collectors/restperf/restperf_test.go index e6c48115c..04daff336 100644 --- a/cmd/collectors/restperf/restperf_test.go +++ b/cmd/collectors/restperf/restperf_test.go @@ -205,12 +205,12 @@ func TestRestPerf_pollData(t *testing.T) { func newRestPerf(object string, path string) *RestPerf { var err error - opts := options.Options{ - Poller: pollerName, - HomePath: "testdata", - IsTest: true, - } - ac := collector.New("RestPerf", object, &opts, params(object, path), nil) + opts := options.New(options.WithConfPath("testdata/conf")) + opts.Poller = pollerName + opts.HomePath = "testdata" + opts.IsTest = true + + ac := collector.New("RestPerf", object, opts, params(object, path), nil) r := RestPerf{} err = r.Init(ac) if err != nil { diff --git a/cmd/collectors/zapi/plugins/sensor/sensor_test.go b/cmd/collectors/sensor_test.go similarity index 74% rename from cmd/collectors/zapi/plugins/sensor/sensor_test.go rename to cmd/collectors/sensor_test.go index 576585f9f..9b233f23d 100644 --- a/cmd/collectors/zapi/plugins/sensor/sensor_test.go +++ b/cmd/collectors/sensor_test.go @@ -1,27 +1,39 @@ -package sensor +package collectors import ( + "fmt" "github.com/netapp/harvest/v2/cmd/poller/plugin" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/logging" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree" "github.com/netapp/harvest/v2/pkg/tree/node" "os" + "path/filepath" "strings" "testing" ) -var testxml = "../../../../../cmd/collectors/zapi/plugins/sensor/testdata/sensor.xml" +var testxml = "testdata/sensor.xml" var mat *matrix.Matrix var sensor = &Sensor{AbstractPlugin: plugin.New("sensor", nil, nil, nil, "sensor", nil)} -func init() { - //setup matrix data +func TestMain(m *testing.M) { + loadTestdata() + os.Exit(m.Run()) +} + +func loadTestdata() { + // setup matrix data var err error var fetch func(*matrix.Instance, *node.Node, []string) - dat, _ := os.ReadFile(testxml) - instanceLabelPaths := map[string]string{"environment-sensors-info.discrete-sensor-state": "discrete_state", + dat, err := os.ReadFile(testxml) + if err != nil { + abs, _ := filepath.Abs(testxml) + fmt.Printf("failed to load %s\n", abs) + panic(err) + } + instanceLabelPaths := map[string]string{ + "environment-sensors-info.discrete-sensor-state": "discrete_state", "environment-sensors-info.sensor-type": "type", "environment-sensors-info.threshold-sensor-state": "threshold_state", "environment-sensors-info.warning-high-threshold": "warning_high", @@ -55,7 +67,10 @@ func init() { _, _ = mat.NewMetricInt64("environment-sensors-info.critical-high-threshold") _, _ = mat.NewMetricInt64("environment-sensors-info.critical-low-threshold") _, _ = mat.NewMetricInt64("environment-sensors-info.threshold-sensor-value") - response, _ := tree.LoadXML(dat) + response, err := tree.LoadXML(dat) + if err != nil { + panic(err) + } instances := response.SearchChildren(shortestPathPrefix) for _, instanceElem := range instances { keys, found := instanceElem.SearchContent(shortestPathPrefix, instanceKeyPath) @@ -78,7 +93,7 @@ func init() { sensor.data = matrix.New("Sensor", "environment_sensor", "environment_sensor") sensor.instanceKeys = make(map[string]string) - sensor.instanceLabels = make(map[string]*dict.Dict) + sensor.instanceLabels = make(map[string]map[string]string) sensor.AbstractPlugin.Logger = logging.Get() for _, k := range eMetrics { @@ -91,34 +106,39 @@ func init() { // average_ambient_temperature is // cat cmd/collectors/zapi/plugins/sensor/testdata/sensor.xml | dasel -r xml -w json | jq -r '.root."attributes-list"."environment-sensors-info"[] | select(."sensor-type" | test("thermal")) | {node: (."node-name"), name: (."sensor-name"), value: (."threshold-sensor-value")} | [.node, .name, .value] | @csv' | rg "Ambient Temp|Ambient Temp \d|PSU\d AmbTemp|PSU\d Inlet|PSU\d Inlet Temp|In Flow Temp|Front Temp|Bat Ambient \d|Riser Inlet Temp" | rg -v "Fake" | mlr --csv --implicit-csv-header label node,name,value then stats1 -a min,mean,max -f value -g node | mlr --csv --opprint --barred cat -//+------------+-----------+------------+-----------+ -//| node | value_min | value_mean | value_max | -//+------------+-----------+------------+-----------+ -//| cdot-k3-05 | 21 | 22 | 23 | -//| cdot-k3-06 | 21 | 22.5 | 24 | -//| cdot-k3-07 | 21 | 22 | 23 | -//| cdot-k3-08 | 21 | 22.5 | 24 | -//+------------+-----------+------------+-----------+ +// +------------+-----------+------------+-----------+ +// | node | value_min | value_mean | value_max | +// +------------+-----------+------------+-----------+ +// | cdot-k3-05 | 21 | 22 | 23 | +// | cdot-k3-06 | 21 | 22.5 | 24 | +// | cdot-k3-07 | 21 | 22 | 23 | +// | cdot-k3-08 | 21 | 22.5 | 24 | +// +------------+-----------+------------+-----------+ // // average_temperature [min, avg, max] is calculated like so // cat cmd/collectors/zapi/plugins/sensor/testdata/sensor.xml | dasel -r xml -w json | jq -r '.root."attributes-list"."environment-sensors-info"[] | select(."sensor-type" | test("thermal")) | {node: (."node-name"), name: (."sensor-name"), value: (."threshold-sensor-value")} | [.node, .name, .value] | @csv' | rg -v "Ambient Temp|Ambient Temp \d|PSU\d AmbTemp|PSU\d Inlet|PSU\d Inlet Temp|In Flow Temp|Front Temp|Bat Ambient \d|Riser Inlet Temp" | rg -v "Fake" | mlr --csv --implicit-csv-header label node,name,value then stats1 -a min,mean,max -f value -g node | mlr --csv --opprint --barred cat -//+------------+-----------+--------------------+-----------+ -//| node | value_min | value_mean | value_max | -//+------------+-----------+--------------------+-----------+ -//| cdot-k3-05 | 19 | 26.823529411764707 | 36 | -//| cdot-k3-06 | 19 | 26.352941176470587 | 35 | -//| cdot-k3-07 | 19 | 26.352941176470587 | 35 | -//| cdot-k3-08 | 20 | 27.176470588235293 | 36 | -//+------------+-----------+--------------------+-----------+ +// +------------+-----------+--------------------+-----------+ +// | node | value_min | value_mean | value_max | +// +------------+-----------+--------------------+-----------+ +// | cdot-k3-05 | 19 | 26.823529411764707 | 36 | +// | cdot-k3-06 | 19 | 26.352941176470587 | 35 | +// | cdot-k3-07 | 19 | 26.352941176470587 | 35 | +// | cdot-k3-08 | 20 | 27.176470588235293 | 36 | +// +------------+-----------+--------------------+-----------+ func TestSensor_Run(t *testing.T) { - - dataMap := map[string]*matrix.Matrix{ - mat.Object: mat, + nodeToNumNode := map[string]int{ + "cdot-k3-05": 1, + "cdot-k3-06": 1, + "cdot-k3-07": 1, + "cdot-k3-08": 1, + } + omat, err := calculateEnvironmentMetrics(mat, logging.Get(), zapiValueKey, sensor.data, nodeToNumNode) + if err != nil { + t.Errorf("got err %v", err) } - omat, _ := sensor.Run(dataMap) expected := map[string]map[string]float64{ "average_ambient_temperature": {"cdot-k3-05": 22, "cdot-k3-06": 22.5, "cdot-k3-07": 22, "cdot-k3-08": 22.5}, @@ -134,6 +154,9 @@ func TestSensor_Run(t *testing.T) { for _, k := range eMetrics { metrics := omat[0].GetMetrics() + if len(omat[0].GetInstances()) == 0 { + t.Errorf("got no instances") + } for iKey, v := range omat[0].GetInstances() { got, _ := metrics[k].GetValueFloat64(v) exp := expected[k][iKey] diff --git a/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go b/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go index d4f6fa12a..1e8e23967 100644 --- a/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go +++ b/cmd/collectors/storagegrid/plugins/joinrest/joinrest.go @@ -131,9 +131,9 @@ func (t *JoinRest) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, err } func (t *JoinRest) updateCache(model join, bytes *[]byte) { - results := gjson.GetManyBytes(*bytes, "data.#."+model.JoinRest, "data.#."+model.LabelRest) - keys := results[0].Array() - vals := results[1].Array() + results := gjson.ParseBytes(*bytes) + keys := results.Get("data.#." + model.JoinRest).Array() + vals := results.Get("data.#." + model.LabelRest).Array() if len(keys) != len(vals) { t.Logger.Error(). Str("restKey", model.JoinRest). diff --git a/cmd/collectors/storagegrid/rest/client.go b/cmd/collectors/storagegrid/rest/client.go index d8feb4d08..c2af63765 100644 --- a/cmd/collectors/storagegrid/rest/client.go +++ b/cmd/collectors/storagegrid/rest/client.go @@ -143,8 +143,8 @@ func (c *Client) Fetch(request string, result *[]gjson.Result) error { return fmt.Errorf("error making request %w", err) } - output := gjson.GetManyBytes(fetched, "data") - data = output[0] + output := gjson.ParseBytes(fetched) + data = output.Get("data") for _, r := range data.Array() { *result = append(*result, r.Array()...) } @@ -172,8 +172,8 @@ func (c *Client) GetMetricQuery(metric string, result *[]gjson.Result) error { if err != nil { return err } - output := gjson.GetManyBytes(fetched, "data") - data := output[0] + output := gjson.ParseBytes(fetched) + data := output.Get("data") for _, r := range data.Array() { *result = append(*result, r.Array()...) } @@ -280,8 +280,8 @@ func (c *Client) Init(retries int) error { if content, err = c.GetGridRest("grid/config/product-version"); err != nil { continue } - results := gjson.GetManyBytes(content, "data.productVersion") - err = c.SetVersion(results[0].String()) + results := gjson.ParseBytes(content) + err = c.SetVersion(results.Get("data.productVersion").String()) if err != nil { return err } @@ -289,14 +289,15 @@ func (c *Client) Init(retries int) error { if content, err = c.GetGridRest("grid/health/topology?depth=grid"); err != nil { continue } - results = gjson.GetManyBytes(content, "data.name") - c.Cluster.Name = strings.ReplaceAll(results[0].String(), " ", "_") + + results = gjson.ParseBytes(content) + c.Cluster.Name = strings.ReplaceAll(results.Get("data.name").String(), " ", "_") if content, err = c.GetGridRest("grid/license"); err != nil { continue } - results = gjson.GetManyBytes(content, "data.systemId") - c.Cluster.UUID = results[0].String() + results = gjson.ParseBytes(content) + c.Cluster.UUID = results.Get("data.systemId").String() return nil } @@ -377,9 +378,9 @@ func (c *Client) fetchTokenWithAuthRetry() error { return errs.NewStorageGridErr(response.StatusCode, body) } - results := gjson.GetManyBytes(body, "data", "message.text") - token := results[0] - errorMsg := results[1] + results := gjson.ParseBytes(body) + token := results.Get("data") + errorMsg := results.Get("message.text") if token.Exists() { c.token = token.String() diff --git a/cmd/collectors/storagegrid/storagegrid.go b/cmd/collectors/storagegrid/storagegrid.go index 1d20fa6da..bd189b8e4 100644 --- a/cmd/collectors/storagegrid/storagegrid.go +++ b/cmd/collectors/storagegrid/storagegrid.go @@ -449,24 +449,18 @@ func (s *StorageGrid) InitProp() { func (s *StorageGrid) LoadTemplate() (string, error) { var ( - template *node.Node - templatePath string - err error + template *node.Node + path string + err error ) - // import template - - template, templatePath, err = s.ImportSubTemplate( - "", - rest.TemplateFn(s.Params, s.Object), - s.client.Cluster.Version, - ) + template, path, err = s.ImportSubTemplate("", rest.TemplateFn(s.Params, s.Object), s.client.Cluster.Version) if err != nil { return "", err } s.Params.Union(template) - return templatePath, nil + return path, nil } func (s *StorageGrid) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin { @@ -503,7 +497,7 @@ func (s *StorageGrid) CollectAutoSupport(p *collector.Payload) { exporterTypes = append(exporterTypes, exporter.GetClass()) } - var counters = make([]string, 0) + var counters = make([]string, 0, len(s.Props.Counters)) for k := range s.Props.Counters { counters = append(counters, k) } @@ -520,6 +514,16 @@ func (s *StorageGrid) CollectAutoSupport(p *collector.Payload) { } // Add collector information + md := s.GetMetadata() + info := collector.InstanceInfo{ + Count: md.LazyValueInt64("instances", "data"), + DataPoints: md.LazyValueInt64("metrics", "data"), + PollTime: md.LazyValueInt64("poll_time", "data"), + APITime: md.LazyValueInt64("api_time", "data"), + ParseTime: md.LazyValueInt64("parse_time", "data"), + PluginTime: md.LazyValueInt64("plugin_time", "data"), + } + p.AddCollectorAsup(collector.AsupCollector{ Name: s.Name, Query: s.Props.Query, @@ -530,6 +534,7 @@ func (s *StorageGrid) CollectAutoSupport(p *collector.Payload) { }, Schedules: schedules, ClientTimeout: s.client.Timeout.String(), + InstanceInfo: &info, }) version := s.client.Cluster.Version @@ -551,15 +556,6 @@ func (s *StorageGrid) CollectAutoSupport(p *collector.Payload) { } if s.Object == "Tenant" { - md := s.GetMetadata() - info := collector.InstanceInfo{ - Count: md.LazyValueInt64("instances", "data"), - DataPoints: md.LazyValueInt64("metrics", "data"), - PollTime: md.LazyValueInt64("poll_time", "data"), - APITime: md.LazyValueInt64("api_time", "data"), - ParseTime: md.LazyValueInt64("parse_time", "data"), - PluginTime: md.LazyValueInt64("plugin_time", "data"), - } p.Tenants = &info } } diff --git a/cmd/collectors/zapi/plugins/sensor/testdata/sensor.xml b/cmd/collectors/testdata/sensor.xml similarity index 100% rename from cmd/collectors/zapi/plugins/sensor/testdata/sensor.xml rename to cmd/collectors/testdata/sensor.xml diff --git a/cmd/collectors/unix/main.go b/cmd/collectors/unix/main.go index badd567cd..8739c23cc 100644 --- a/cmd/collectors/unix/main.go +++ b/cmd/collectors/unix/main.go @@ -280,7 +280,7 @@ func (u *Unix) PollInstance() (map[string]*matrix.Matrix, error) { currInstances := set.NewFrom(mat.GetInstanceKeys()) currSize := currInstances.Size() - err := conf.LoadHarvestConfig(u.Options.Config) + _, err := conf.LoadHarvestConfig(u.Options.Config) if err != nil { return nil, err } diff --git a/cmd/collectors/zapi/collector/zapi.go b/cmd/collectors/zapi/collector/zapi.go index 6436c01e4..22ce1681c 100644 --- a/cmd/collectors/zapi/collector/zapi.go +++ b/cmd/collectors/zapi/collector/zapi.go @@ -6,13 +6,13 @@ package zapi import ( "fmt" + "github.com/netapp/harvest/v2/cmd/collectors" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/aggregate" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/certificate" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/qospolicyadaptive" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/qospolicyfixed" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/qtree" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/security" - "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/sensor" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/shelf" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/snapmirror" "github.com/netapp/harvest/v2/cmd/collectors/zapi/plugins/svm" @@ -85,21 +85,20 @@ func (z *Zapi) Init(a *collector.AbstractCollector) error { } func (z *Zapi) InitVars() error { - var err error - // It's used for unit tests only if z.Options.IsTest { z.Client = client.NewTestClient() templateName := z.Params.GetChildS("objects").GetChildContentS(z.Object) - template, templatePath, err := z.ImportSubTemplate("cdot", templateName, [3]int{9, 8, 0}) + template, path, err := z.ImportSubTemplate("cdot", templateName, [3]int{9, 8, 0}) if err != nil { - return fmt.Errorf("unable to import template=[%s] %w", templatePath, err) + return err } - z.TemplatePath = templatePath + z.TemplatePath = path z.Params.Union(template) return nil } + var err error if z.Client, err = client.New(conf.ZapiPoller(z.Params), z.Auth); err != nil { // convert to connection error, so poller aborts return errs.New(errs.ErrConnection, err.Error()) } @@ -122,12 +121,12 @@ func (z *Zapi) InitVars() error { z.HostModel = model templateName := z.Params.GetChildS("objects").GetChildContentS(z.Object) - template, templatePath, err := z.ImportSubTemplate(model, templateName, z.Client.Version()) + template, path, err := z.ImportSubTemplate(model, templateName, z.Client.Version()) if err != nil { - return fmt.Errorf("unable to import template=[%s] %w", templatePath, err) + return err } - z.TemplatePath = templatePath + z.TemplatePath = path z.Params.Union(template) @@ -159,7 +158,7 @@ func (z *Zapi) LoadPlugin(kind string, abc *plugin.AbstractPlugin) plugin.Plugin case "Volume": return volume.New(abc) case "Sensor": - return sensor.New(abc) + return collectors.NewSensor(abc) case "Certificate": return certificate.New(abc) case "SVM": @@ -285,7 +284,9 @@ func (z *Zapi) PollData() (map[string]*matrix.Matrix, error) { // Handling array with comma separated values previousValue := instance.GetLabel(label) if isAppend && previousValue != "" { - instance.SetLabel(label, previousValue+","+value) + currentVal := strings.Split(previousValue+","+value, ",") + sort.Strings(currentVal) + instance.SetLabel(label, strings.Join(currentVal, ",")) z.Logger.Trace().Msgf(" > %slabel (%s) [%s] set value (%s)%s", color.Yellow, key, label, instance.GetLabel(label)+","+value, color.End) } else { instance.SetLabel(label, value) @@ -375,12 +376,12 @@ func (z *Zapi) PollData() (map[string]*matrix.Matrix, error) { } for _, instanceElem := range instances { - //c.logger.Printf(c.Prefix, "Handling instance element <%v> [%s]", &instance, instance.GetName()) + // c.logger.Printf(c.Prefix, "Handling instance element <%v> [%s]", &instance, instance.GetName()) keys, found := instanceElem.SearchContent(z.shortestPathPrefix, z.instanceKeyPaths) - //logger.Debug(z.Prefix, "Fetched instance keys: %s", strings.Join(keys, ".")) + // logger.Debug(z.Prefix, "Fetched instance keys: %s", strings.Join(keys, ".")) if !found { - //logger.Debug(z.Prefix, "Skipping instance: no keys fetched") + // logger.Debug(z.Prefix, "Skipping instance: no keys fetched") continue } @@ -451,6 +452,16 @@ func (z *Zapi) CollectAutoSupport(p *collector.Payload) { } // Add collector information + md := z.GetMetadata() + info := collector.InstanceInfo{ + Count: md.LazyValueInt64("instances", "data"), + DataPoints: md.LazyValueInt64("metrics", "data"), + PollTime: md.LazyValueInt64("poll_time", "data"), + APITime: md.LazyValueInt64("api_time", "data"), + ParseTime: md.LazyValueInt64("parse_time", "data"), + PluginTime: md.LazyValueInt64("plugin_time", "data"), + } + p.AddCollectorAsup(collector.AsupCollector{ Name: z.Name, Query: z.Query, @@ -462,6 +473,7 @@ func (z *Zapi) CollectAutoSupport(p *collector.Payload) { }, Schedules: schedules, ClientTimeout: clientTimeout, + InstanceInfo: &info, }) if z.Name == "Zapi" && (z.Object == "Volume" || z.Object == "Node") { @@ -472,16 +484,6 @@ func (z *Zapi) CollectAutoSupport(p *collector.Payload) { } p.Target.ClusterUUID = z.Client.ClusterUUID() - md := z.GetMetadata() - info := collector.InstanceInfo{ - Count: md.LazyValueInt64("instances", "data"), - DataPoints: md.LazyValueInt64("metrics", "data"), - PollTime: md.LazyValueInt64("poll_time", "data"), - APITime: md.LazyValueInt64("api_time", "data"), - ParseTime: md.LazyValueInt64("parse_time", "data"), - PluginTime: md.LazyValueInt64("plugin_time", "data"), - } - if z.Object == "Node" { var ( nodeIds []collector.ID diff --git a/cmd/collectors/zapi/plugins/aggregate/aggregate.go b/cmd/collectors/zapi/plugins/aggregate/aggregate.go index e1b07b408..cc4105957 100644 --- a/cmd/collectors/zapi/plugins/aggregate/aggregate.go +++ b/cmd/collectors/zapi/plugins/aggregate/aggregate.go @@ -52,18 +52,47 @@ func (a *Aggregate) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, er if err := a.getCloudStores(); err != nil { if errors.Is(err, errs.ErrNoInstance) { a.Logger.Debug().Err(err).Msg("Failed to collect cloud store data") - return nil, nil } - return nil, err + } + + aggrFootprintMap, err := a.getAggrFootprint() + if err != nil { + a.Logger.Error().Err(err).Msg("Failed to update footprint data") + // clean the map in case of the error + clear(aggrFootprintMap) } // update aggregate instance label with cloud stores info - if len(a.aggrCloudStoresMap) > 0 { - for uuid, aggr := range data.GetInstances() { - if !aggr.IsExportable() { - continue + for aggrUUID, aggr := range data.GetInstances() { + if !aggr.IsExportable() { + continue + } + aggr.SetLabel("cloud_stores", strings.Join(a.aggrCloudStoresMap[aggrUUID], ",")) + + // Handling aggr footprint metrics + aggrName := aggr.GetLabel("aggr") + if af, ok := aggrFootprintMap[aggrName]; ok { + for afKey, afVal := range af { + vfMetric := data.GetMetric(afKey) + if vfMetric == nil { + if vfMetric, err = data.NewMetricFloat64(afKey); err != nil { + a.Logger.Error().Err(err).Str("metric", afKey).Msg("add metric") + continue + } + } + + if afVal != "" { + vfMetricVal, err := strconv.ParseFloat(afVal, 64) + if err != nil { + a.Logger.Error().Err(err).Str(afKey, afVal).Msg("parse") + continue + } + if err = vfMetric.SetValueFloat64(aggr, vfMetricVal); err != nil { + a.Logger.Error().Err(err).Str(afKey, afVal).Msg("set") + continue + } + } } - aggr.SetLabel("cloud_stores", strings.Join(a.aggrCloudStoresMap[uuid], ",")) } } return nil, nil @@ -124,3 +153,44 @@ func (a *Aggregate) getCloudStores() error { } return nil } + +func (a *Aggregate) getAggrFootprint() (map[string]map[string]string, error) { + var ( + result []*node.Node + aggrFootprintMap map[string]map[string]string + err error + ) + + aggrFootprintMap = make(map[string]map[string]string) + request := node.NewXMLS("aggr-space-get-iter") + request.NewChildS("max-records", collectors.DefaultBatchSize) + desired := node.NewXMLS("desired-attributes") + spaceInfo := node.NewXMLS("space-information") + spaceInfo.NewChildS("aggregate", "") + spaceInfo.NewChildS("volume-footprints", "") + spaceInfo.NewChildS("volume-footprints-percent", "") + desired.AddChild(spaceInfo) + request.AddChild(desired) + + if result, err = a.client.InvokeZapiCall(request); err != nil { + return nil, err + } + + if len(result) == 0 { + return aggrFootprintMap, nil + } + + for _, footprint := range result { + footprintMetrics := make(map[string]string) + aggr := footprint.GetChildContentS("aggregate") + performanceTierUsed := footprint.GetChildContentS("volume-footprints") + performanceTierUsedPerc := footprint.GetChildContentS("volume-footprints-percent") + if performanceTierUsed != "" || performanceTierUsedPerc != "" { + footprintMetrics["space_performance_tier_used"] = performanceTierUsed + footprintMetrics["space_performance_tier_used_percent"] = performanceTierUsedPerc + aggrFootprintMap[aggr] = footprintMetrics + } + } + + return aggrFootprintMap, nil +} diff --git a/cmd/collectors/zapi/plugins/qtree/qtree.go b/cmd/collectors/zapi/plugins/qtree/qtree.go index 999f04944..af8cfac88 100644 --- a/cmd/collectors/zapi/plugins/qtree/qtree.go +++ b/cmd/collectors/zapi/plugins/qtree/qtree.go @@ -6,7 +6,6 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/api/ontapi/zapi" "github.com/netapp/harvest/v2/pkg/conf" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -23,7 +22,7 @@ type Qtree struct { *plugin.AbstractPlugin data *matrix.Matrix instanceKeys map[string]string - instanceLabels map[string]*dict.Dict + instanceLabels map[string]map[string]string batchSize string client *zapi.Client query string @@ -61,7 +60,7 @@ func (q *Qtree) Init() error { q.data = matrix.New(q.Parent+".Qtree", "quota", "quota") q.instanceKeys = make(map[string]string) - q.instanceLabels = make(map[string]*dict.Dict) + q.instanceLabels = make(map[string]map[string]string) q.historicalLabels = false if q.Params.HasChildS("historicalLabels") { @@ -70,7 +69,7 @@ func (q *Qtree) Init() error { // apply all instance keys, instance labels from parent (qtree.yaml) to all quota metrics if exportOption := q.ParentParams.GetChildS("export_options"); exportOption != nil { - //parent instancekeys would be added in plugin metrics + // parent instancekeys would be added in plugin metrics if parentKeys := exportOption.GetChildS("instance_keys"); parentKeys != nil { for _, parentKey := range parentKeys.GetAllChildContentS() { instanceKeys.NewChildS("", parentKey) @@ -173,7 +172,7 @@ func (q *Qtree) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) } } - cluster, _ := data.GetGlobalLabels().GetHas("cluster") + cluster := data.GetGlobalLabels()["cluster"] for { response, tag, ad, pd, err = q.client.InvokeBatchWithTimers(request, tag) @@ -291,7 +290,7 @@ func (q *Qtree) handlingHistoricalMetrics(quotas []*node.Node, data *matrix.Matr } } - //set labels + // set labels quotaInstance.SetLabel("type", quotaType) quotaInstance.SetLabel("qtree", tree) quotaInstance.SetLabel("volume", volume) @@ -385,7 +384,7 @@ func (q *Qtree) handlingQuotaMetrics(quotas []*node.Node, cluster string, quotaI q.Logger.Debug().Msgf("add (%s) instance: %v", attribute, err) return err } - //set labels + // set labels quotaInstance.SetLabel("type", quotaType) quotaInstance.SetLabel("qtree", tree) quotaInstance.SetLabel("volume", volume) diff --git a/cmd/collectors/zapi/plugins/sensor/sensor.go b/cmd/collectors/zapi/plugins/sensor/sensor.go deleted file mode 100644 index 1b17dbaf3..000000000 --- a/cmd/collectors/zapi/plugins/sensor/sensor.go +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Copyright NetApp Inc, 2021 All rights reserved - */ - -package sensor - -import ( - "fmt" - "github.com/netapp/harvest/v2/cmd/collectors" - "github.com/netapp/harvest/v2/cmd/poller/plugin" - "github.com/netapp/harvest/v2/pkg/dict" - "github.com/netapp/harvest/v2/pkg/matrix" - "github.com/netapp/harvest/v2/pkg/util" - "regexp" - "sort" - "strings" -) - -type Sensor struct { - *plugin.AbstractPlugin - data *matrix.Matrix - instanceKeys map[string]string - instanceLabels map[string]*dict.Dict -} - -type sensorEnvironmentMetric struct { - key string - ambientTemperature []float64 - nonAmbientTemperature []float64 - fanSpeed []float64 - powerSensor map[string]*sensorValue - voltageSensor map[string]*sensorValue - currentSensor map[string]*sensorValue -} - -type sensorValue struct { - name string - value float64 - unit string -} - -func New(p *plugin.AbstractPlugin) plugin.Plugin { - return &Sensor{AbstractPlugin: p} -} - -var ambientRegex = regexp.MustCompile(`^(Ambient Temp|Ambient Temp \d|PSU\d AmbTemp|PSU\d Inlet|PSU\d Inlet Temp|In Flow Temp|Front Temp|Bat_Ambient \d|Riser Inlet Temp)$`) -var powerInRegex = regexp.MustCompile(`^PSU\d (InPwr Monitor|InPower|PIN|Power In)$`) -var voltageRegex = regexp.MustCompile(`^PSU\d (\d+V|InVoltage|VIN|AC In Volt)$`) -var currentRegex = regexp.MustCompile(`^PSU\d (\d+V Curr|Curr|InCurrent|Curr IIN|AC In Curr)$`) -var eMetrics = []string{ - "average_ambient_temperature", - "average_fan_speed", - "average_temperature", - "max_fan_speed", - "max_temperature", - "min_ambient_temperature", - "min_fan_speed", - "min_temperature", - "power", -} - -func (my *Sensor) Init() error { - if err := my.InitAbc(); err != nil { - return err - } - - my.data = matrix.New(my.Parent+".Sensor", "environment_sensor", "environment_sensor") - my.instanceKeys = make(map[string]string) - my.instanceLabels = make(map[string]*dict.Dict) - - // init environment metrics in plugin matrix - // create environment metric if not exists - for _, k := range eMetrics { - err := matrix.CreateMetric(k, my.data) - if err != nil { - my.Logger.Warn().Err(err).Str("key", k).Msg("error while creating metric") - } - } - return nil -} - -func (my *Sensor) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { - data := dataMap[my.Object] - // Purge and reset data - my.data.PurgeInstances() - my.data.Reset() - - // Set all global labels from zapi.go if already not exist - my.data.SetGlobalLabels(data.GetGlobalLabels()) - - return my.calculateEnvironmentMetrics(data) -} - -func (my *Sensor) calculateEnvironmentMetrics(data *matrix.Matrix) ([]*matrix.Matrix, error) { - sensorEnvironmentMetricMap := make(map[string]*sensorEnvironmentMetric) - excludedSensors := make(map[string][]sensorValue) - - for k, instance := range data.GetInstances() { - if !instance.IsExportable() { - continue - } - iKey := instance.GetLabel("node") - if iKey == "" { - my.Logger.Warn().Str("key", k).Msg("missing node label for instance") - continue - } - _, iKey2, found := strings.Cut(k, iKey+".") - if !found { - my.Logger.Warn().Str("key", iKey+".").Msg("missing instance key") - continue - } - if _, ok := sensorEnvironmentMetricMap[iKey]; !ok { - sensorEnvironmentMetricMap[iKey] = &sensorEnvironmentMetric{key: iKey, ambientTemperature: []float64{}, nonAmbientTemperature: []float64{}, fanSpeed: []float64{}} - } - for mKey, metric := range data.GetMetrics() { - if mKey == "environment-sensors-info.threshold-sensor-value" { - sensorType := instance.GetLabel("type") - sensorName := instance.GetLabel("sensor") - sensorUnit := instance.GetLabel("unit") - - isAmbientMatch := ambientRegex.MatchString(sensorName) - isPowerMatch := powerInRegex.MatchString(sensorName) - isVoltageMatch := voltageRegex.MatchString(sensorName) - isCurrentMatch := currentRegex.MatchString(sensorName) - - my.Logger.Debug().Bool("isAmbientMatch", isAmbientMatch). - Bool("isPowerMatch", isPowerMatch). - Bool("isVoltageMatch", isVoltageMatch). - Bool("isCurrentMatch", isCurrentMatch). - Str("sensorType", sensorType). - Str("sensorUnit", sensorUnit). - Str("sensorName", sensorName). - Msg("") - - if sensorType == "thermal" && isAmbientMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - sensorEnvironmentMetricMap[iKey].ambientTemperature = append(sensorEnvironmentMetricMap[iKey].ambientTemperature, value) - } - } - - if sensorType == "thermal" && !isAmbientMatch { - // Exclude temperature sensors that contains sensor name `Margin` and value < 0 - value, ok := metric.GetValueFloat64(instance) - if value > 0 && !strings.Contains(sensorName, "Margin") { - if ok { - sensorEnvironmentMetricMap[iKey].nonAmbientTemperature = append(sensorEnvironmentMetricMap[iKey].nonAmbientTemperature, value) - } - } else { - excludedSensors[iKey] = append(excludedSensors[iKey], sensorValue{ - name: sensorName, - value: value, - }) - } - } - - if sensorType == "fan" { - if value, ok := metric.GetValueFloat64(instance); ok { - sensorEnvironmentMetricMap[iKey].fanSpeed = append(sensorEnvironmentMetricMap[iKey].fanSpeed, value) - } - } - - if isPowerMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if !collectors.IsValidUnit(sensorUnit) { - my.Logger.Warn().Str("unit", sensorUnit).Float64("value", value).Msg("unknown power unit") - } else { - if sensorEnvironmentMetricMap[iKey].powerSensor == nil { - sensorEnvironmentMetricMap[iKey].powerSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].powerSensor[iKey2] = &sensorValue{name: iKey2, value: value, unit: sensorUnit} - } - } - } - - if isVoltageMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if sensorEnvironmentMetricMap[iKey].voltageSensor == nil { - sensorEnvironmentMetricMap[iKey].voltageSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].voltageSensor[iKey2] = &sensorValue{name: iKey2, value: value, unit: sensorUnit} - } - } - - if isCurrentMatch { - if value, ok := metric.GetValueFloat64(instance); ok { - if sensorEnvironmentMetricMap[iKey].currentSensor == nil { - sensorEnvironmentMetricMap[iKey].currentSensor = make(map[string]*sensorValue) - } - sensorEnvironmentMetricMap[iKey].currentSensor[iKey2] = &sensorValue{name: iKey2, value: value, unit: sensorUnit} - } - } - } - } - } - - if len(excludedSensors) > 0 { - var excludedSensorStr string - for k, v := range excludedSensors { - excludedSensorStr += " node:" + k + " sensor:" + fmt.Sprintf("%v", v) - } - my.Logger.Info().Str("sensor", excludedSensorStr). - Msg("sensor excluded") - } - - whrSensors := make(map[string]*sensorValue) - - for key, v := range sensorEnvironmentMetricMap { - instance, err := my.data.NewInstance(key) - if err != nil { - my.Logger.Warn().Str("key", key).Msg("instance not found") - continue - } - // set node label - instance.SetLabel("node", key) - for _, k := range eMetrics { - m := my.data.GetMetric(k) - switch k { - case "power": - var sumPower float64 - if len(v.powerSensor) > 0 { - for _, v1 := range v.powerSensor { - if v1.unit == "mW" || v1.unit == "mW*hr" { - sumPower += v1.value / 1000 - } else if v1.unit == "W" || v1.unit == "W*hr" { - sumPower += v1.value - } else { - my.Logger.Warn().Str("node", key).Str("name", v1.name).Str("unit", v1.unit).Float64("value", v1.value).Msg("unknown power unit") - } - if v1.unit == "mW*hr" || v1.unit == "W*hr" { - whrSensors[v1.name] = v1 - } - } - } else if len(v.voltageSensor) > 0 && len(v.voltageSensor) == len(v.currentSensor) { - // sort voltage keys - voltageKeys := make([]string, 0, len(v.voltageSensor)) - for k := range v.voltageSensor { - voltageKeys = append(voltageKeys, k) - } - sort.Strings(voltageKeys) - - // sort current keys - currentKeys := make([]string, 0, len(v.currentSensor)) - for k := range v.currentSensor { - currentKeys = append(currentKeys, k) - } - sort.Strings(currentKeys) - - for i := range currentKeys { - currentKey := currentKeys[i] - voltageKey := voltageKeys[i] - - // get values - currentSensorValue := v.currentSensor[currentKey] - voltageSensorValue := v.voltageSensor[voltageKey] - - // convert units - if currentSensorValue.unit == "mA" { - currentSensorValue.value = currentSensorValue.value / 1000 - } else if currentSensorValue.unit != "A" { - my.Logger.Warn().Str("node", key).Str("unit", currentSensorValue.unit).Float64("value", currentSensorValue.value).Msg("unknown current unit") - } - - if voltageSensorValue.unit == "mV" { - voltageSensorValue.value = voltageSensorValue.value / 1000 - } else if voltageSensorValue.unit != "V" { - my.Logger.Warn().Str("node", key).Str("unit", voltageSensorValue.unit).Float64("value", voltageSensorValue.value).Msg("unknown voltage unit") - } - - p := currentSensorValue.value * voltageSensorValue.value - - if !strings.EqualFold(voltageSensorValue.name, "in") && !strings.EqualFold(currentSensorValue.name, "in") { - p = p / 0.93 // If the sensor names to do NOT contain "IN" or "in", then we need to adjust the power to account for loss in the power supply. We will use 0.93 as the power supply efficiency factor for all systems. - } - - sumPower += p - } - } else { - my.Logger.Warn().Str("node", key).Int("current size", len(v.currentSensor)).Int("voltage size", len(v.voltageSensor)).Msg("current and voltage sensor are ignored") - } - - err = m.SetValueFloat64(instance, sumPower) - if err != nil { - my.Logger.Error().Float64("power", sumPower).Err(err).Msg("Unable to set power") - } - - case "average_ambient_temperature": - if len(v.ambientTemperature) > 0 { - aaT := util.Avg(v.ambientTemperature) - err = m.SetValueFloat64(instance, aaT) - if err != nil { - my.Logger.Error().Float64("average_ambient_temperature", aaT).Err(err).Msg("Unable to set average_ambient_temperature") - } - } - case "min_ambient_temperature": - maT := util.Min(v.ambientTemperature) - err = m.SetValueFloat64(instance, maT) - if err != nil { - my.Logger.Error().Float64("min_ambient_temperature", maT).Err(err).Msg("Unable to set min_ambient_temperature") - } - case "max_temperature": - mT := util.Max(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, mT) - if err != nil { - my.Logger.Error().Float64("max_temperature", mT).Err(err).Msg("Unable to set max_temperature") - } - case "average_temperature": - if len(v.nonAmbientTemperature) > 0 { - nat := util.Avg(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, nat) - if err != nil { - my.Logger.Error().Float64("average_temperature", nat).Err(err).Msg("Unable to set average_temperature") - } - } - case "min_temperature": - mT := util.Min(v.nonAmbientTemperature) - err = m.SetValueFloat64(instance, mT) - if err != nil { - my.Logger.Error().Float64("min_temperature", mT).Err(err).Msg("Unable to set min_temperature") - } - case "average_fan_speed": - if len(v.fanSpeed) > 0 { - afs := util.Avg(v.fanSpeed) - err = m.SetValueFloat64(instance, afs) - if err != nil { - my.Logger.Error().Float64("average_fan_speed", afs).Err(err).Msg("Unable to set average_fan_speed") - } - } - case "max_fan_speed": - mfs := util.Max(v.fanSpeed) - err = m.SetValueFloat64(instance, mfs) - if err != nil { - my.Logger.Error().Float64("max_fan_speed", mfs).Err(err).Msg("Unable to set max_fan_speed") - } - case "min_fan_speed": - mfs := util.Min(v.fanSpeed) - err = m.SetValueFloat64(instance, mfs) - if err != nil { - my.Logger.Error().Float64("min_fan_speed", mfs).Err(err).Msg("Unable to set min_fan_speed") - } - } - } - } - - if len(whrSensors) > 0 { - var whrSensorsStr string - for _, v := range whrSensors { - whrSensorsStr += " sensor:" + fmt.Sprintf("%v", *v) - } - my.Logger.Info().Str("sensor", whrSensorsStr). - Msg("sensor with *hr units") - } - - return []*matrix.Matrix{my.data}, nil -} diff --git a/cmd/collectors/zapi/plugins/shelf/shelf.go b/cmd/collectors/zapi/plugins/shelf/shelf.go index 0971326fa..6757b0fca 100644 --- a/cmd/collectors/zapi/plugins/shelf/shelf.go +++ b/cmd/collectors/zapi/plugins/shelf/shelf.go @@ -6,7 +6,6 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/api/ontapi/zapi" "github.com/netapp/harvest/v2/pkg/conf" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -21,7 +20,7 @@ type Shelf struct { data map[string]*matrix.Matrix shelfData *matrix.Matrix instanceKeys map[string]string - instanceLabels map[string]*dict.Dict + instanceLabels map[string]map[string]string shelfInstanceKeys []string shelfInstanceLabels []shelfInstanceLabel batchSize string @@ -68,7 +67,7 @@ func (my *Shelf) Init() error { my.data = make(map[string]*matrix.Matrix) my.instanceKeys = make(map[string]string) - my.instanceLabels = make(map[string]*dict.Dict) + my.instanceLabels = make(map[string]map[string]string) objects := my.Params.GetChildS("objects") if objects == nil { @@ -85,7 +84,7 @@ func (my *Shelf) Init() error { objectName = strings.TrimSpace(x[1]) } - my.instanceLabels[attribute] = dict.New() + my.instanceLabels[attribute] = make(map[string]string) my.data[attribute] = matrix.New(my.Parent+".Shelf", "shelf_"+objectName, "shelf_"+objectName) my.data[attribute].SetGlobalLabel("datacenter", my.ParentParams.GetChildContentS("datacenter")) @@ -108,11 +107,11 @@ func (my *Shelf) Init() error { switch kind { case "key": my.instanceKeys[attribute] = metricName - my.instanceLabels[attribute].Set(metricName, display) + my.instanceLabels[attribute][metricName] = display instanceKeys.NewChildS("", display) my.Logger.Debug().Msgf("added instance key: (%s) (%s) [%s]", attribute, x.GetNameS(), display) case "label": - my.instanceLabels[attribute].Set(metricName, display) + my.instanceLabels[attribute][metricName] = display instanceLabels.NewChildS("", display) my.Logger.Debug().Msgf("added instance label: (%s) (%s) [%s]", attribute, x.GetNameS(), display) case "float": @@ -307,7 +306,7 @@ func (my *Shelf) handle7Mode(data *matrix.Matrix, result []*node.Node) ([]*matri } my.Logger.Debug().Msgf("add (%s) instance: %s.%s", attribute, shelfID, key) - for label, labelDisplay := range my.instanceLabels[attribute].Map() { + for label, labelDisplay := range my.instanceLabels[attribute] { if value := obj.GetChildContentS(label); value != "" { instance.SetLabel(labelDisplay, value) } diff --git a/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go b/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go index 26f082d69..5d6a86456 100644 --- a/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go +++ b/cmd/collectors/zapi/plugins/snapmirror/snapmirror.go @@ -56,7 +56,7 @@ func (my *SnapMirror) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, destUpdCount := 0 srcUpdCount := 0 - if cluster, ok := data.GetGlobalLabels().GetHas("cluster"); ok { + if cluster, ok := data.GetGlobalLabels()["cluster"]; ok { if err := my.getSVMPeerData(cluster); err != nil { return nil, err } diff --git a/cmd/collectors/zapi/plugins/volume/volume.go b/cmd/collectors/zapi/plugins/volume/volume.go index 66a5e5ec9..ad9cc9b7b 100644 --- a/cmd/collectors/zapi/plugins/volume/volume.go +++ b/cmd/collectors/zapi/plugins/volume/volume.go @@ -92,19 +92,25 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error } volumeCloneMap, err := v.getVolumeCloneInfo() - if err != nil { v.Logger.Error().Err(err).Msg("Failed to update clone data") } + volumeFootprintMap, err := v.getVolumeFootprint() + if err != nil { + v.Logger.Error().Err(err).Msg("Failed to update footprint data") + // clean the map in case of the error + clear(volumeFootprintMap) + } + // update volume instance labels - v.updateVolumeLabels(data, volumeCloneMap) + v.updateVolumeLabels(data, volumeCloneMap, volumeFootprintMap) v.currentVal++ return nil, nil } -func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone) { +func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[string]volumeClone, volumeFootprintMap map[string]map[string]string) { var err error for _, volume := range data.GetInstances() { if !volume.IsExportable() { @@ -143,6 +149,31 @@ func (v *Volume) updateVolumeLabels(data *matrix.Matrix, volumeCloneMap map[stri continue } } + + // Handling volume footprint metrics + if vf, ok := volumeFootprintMap[key]; ok { + for vfKey, vfVal := range vf { + vfMetric := data.GetMetric(vfKey) + if vfMetric == nil { + if vfMetric, err = data.NewMetricFloat64(vfKey); err != nil { + v.Logger.Error().Err(err).Str("metric", vfKey).Msg("add metric") + continue + } + } + + if vfVal != "" { + vfMetricVal, err := strconv.ParseFloat(vfVal, 64) + if err != nil { + v.Logger.Error().Err(err).Str(vfKey, vfVal).Msg("parse") + continue + } + if err = vfMetric.SetValueFloat64(volume, vfMetricVal); err != nil { + v.Logger.Error().Err(err).Str(vfKey, vfVal).Msg("set") + continue + } + } + } + } } } @@ -186,6 +217,53 @@ func (v *Volume) getVolumeCloneInfo() (map[string]volumeClone, error) { return volumeCloneMap, nil } +func (v *Volume) getVolumeFootprint() (map[string]map[string]string, error) { + var ( + result []*node.Node + volumeFootprintMap map[string]map[string]string + err error + ) + + volumeFootprintMap = make(map[string]map[string]string) + request := node.NewXMLS("volume-footprint-get-iter") + request.NewChildS("max-records", collectors.DefaultBatchSize) + desired := node.NewXMLS("desired-attributes") + footprintInfo := node.NewXMLS("footprint-info") + footprintInfo.NewChildS("volume", "") + footprintInfo.NewChildS("vserver", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin0", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin0-percent", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin1", "") + footprintInfo.NewChildS("volume-blocks-footprint-bin1-percent", "") + desired.AddChild(footprintInfo) + request.AddChild(desired) + + if result, err = v.client.InvokeZapiCall(request); err != nil { + return nil, err + } + + if len(result) == 0 { + return volumeFootprintMap, nil + } + + for _, footprint := range result { + footprintMetrics := make(map[string]string) + volume := footprint.GetChildContentS("volume") + svm := footprint.GetChildContentS("vserver") + performanceTierFootprint := footprint.GetChildContentS("volume-blocks-footprint-bin0") + performanceTierFootprintPerc := footprint.GetChildContentS("volume-blocks-footprint-bin0-percent") + capacityTierFootprint := footprint.GetChildContentS("volume-blocks-footprint-bin1") + capacityTierFootprintPerc := footprint.GetChildContentS("volume-blocks-footprint-bin1-percent") + footprintMetrics["performance_tier_footprint"] = performanceTierFootprint + footprintMetrics["performance_tier_footprint_percent"] = performanceTierFootprintPerc + footprintMetrics["capacity_tier_footprint"] = capacityTierFootprint + footprintMetrics["capacity_tier_footprint_percent"] = capacityTierFootprintPerc + volumeFootprintMap[volume+svm] = footprintMetrics + } + + return volumeFootprintMap, nil +} + func (v *Volume) getEncryptedDisks() ([]string, error) { var ( result []*node.Node diff --git a/cmd/collectors/zapiperf/plugins/disk/disk.go b/cmd/collectors/zapiperf/plugins/disk/disk.go index 7e4f27b39..1d025d816 100644 --- a/cmd/collectors/zapiperf/plugins/disk/disk.go +++ b/cmd/collectors/zapiperf/plugins/disk/disk.go @@ -5,7 +5,6 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/api/ontapi/zapi" "github.com/netapp/harvest/v2/pkg/conf" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -45,7 +44,7 @@ type Disk struct { shelfData map[string]*matrix.Matrix powerData map[string]*matrix.Matrix instanceKeys map[string]string - instanceLabels map[string]*dict.Dict + instanceLabels map[string]map[string]string batchSize string client *zapi.Client query string @@ -66,7 +65,6 @@ type aggregate struct { isShared bool power float64 derivedType RaidAggrDerivedType - export bool } type disk struct { @@ -131,7 +129,7 @@ func (d *Disk) Init() error { d.powerData = make(map[string]*matrix.Matrix) d.instanceKeys = make(map[string]string) - d.instanceLabels = make(map[string]*dict.Dict) + d.instanceLabels = make(map[string]map[string]string) objects := d.Params.GetChildS("objects") if objects == nil { @@ -148,7 +146,7 @@ func (d *Disk) Init() error { objectName = strings.TrimSpace(x[1]) } - d.instanceLabels[attribute] = dict.New() + d.instanceLabels[attribute] = make(map[string]string) d.shelfData[attribute] = matrix.New(d.Parent+".Shelf", "shelf_"+objectName, "shelf_"+objectName) d.shelfData[attribute].SetGlobalLabel("datacenter", d.ParentParams.GetChildContentS("datacenter")) @@ -171,11 +169,11 @@ func (d *Disk) Init() error { switch kind { case "key": d.instanceKeys[attribute] = metricName - d.instanceLabels[attribute].Set(metricName, display) + d.instanceLabels[attribute][metricName] = display instanceKeys.NewChildS("", display) d.Logger.Debug().Msgf("added instance key: (%s) (%s) [%s]", attribute, x.GetNameS(), display) case "label": - d.instanceLabels[attribute].Set(metricName, display) + d.instanceLabels[attribute][metricName] = display instanceLabels.NewChildS("", display) d.Logger.Debug().Msgf("added instance label: (%s) (%s) [%s]", attribute, x.GetNameS(), display) case "float": @@ -313,7 +311,6 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) if totalTransfers == nil { return output, errs.New(errs.ErrNoMetric, "total_transfers") } - totaliops := make(map[string]float64) // calculate power for returned disks in zapiperf response for _, instance := range data.GetInstances() { @@ -335,9 +332,7 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) sh, ok := d.ShelfMap[shelfID] if ok { diskPower := v * sh.power / sh.iops - totaliops[shelfID] = totaliops[shelfID] + v - aggrPower := a.power + diskPower - a.power = aggrPower + a.power += diskPower } } else { d.Logger.Warn().Str("diskUUID", diskUUID).Msg("Missing disk info") @@ -385,23 +380,21 @@ func (d *Disk) calculateAggrPower(data *matrix.Matrix, output []*matrix.Matrix) // fill aggr power matrix with power calculated above for k, v := range d.aggrMap { - if v.export { - instanceKey := k - instance, err := aggrData.NewInstance(instanceKey) - if err != nil { - d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to add instance") - continue - } - instance.SetLabel("aggr", k) - instance.SetLabel("derivedType", string(v.derivedType)) - instance.SetLabel("node", v.node) - - m := aggrData.GetMetric("power") - err = m.SetValueFloat64(instance, v.power) - if err != nil { - d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to set value") - continue - } + instanceKey := k + instance, err := aggrData.NewInstance(instanceKey) + if err != nil { + d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to add instance") + continue + } + instance.SetLabel("aggr", k) + instance.SetLabel("derivedType", string(v.derivedType)) + instance.SetLabel("node", v.node) + + m := aggrData.GetMetric("power") + err = m.SetValueFloat64(instance, v.power) + if err != nil { + d.Logger.Error().Err(err).Str("key", instanceKey).Msg("Failed to set value") + continue } } output = append(output, aggrData) @@ -553,7 +546,6 @@ func (d *Disk) getAggregates() error { aggrRaidAttributes.NewChildS("uses-shared-disks", "") aggrRaidAttributes.NewChildS("aggregate-type", "") aggrRaidAttributes.NewChildS("is-composite", "") - aggrRaidAttributes.NewChildS("is-root-aggregate", "") aggrAttributes.AddChild(aggrRaidAttributes) aggrAttributes.AddChild(aggrOwnerAttributes) desired.AddChild(aggrAttributes) @@ -584,21 +576,17 @@ func (d *Disk) getAggregates() error { nodeName = aggrOwnerAttr.GetChildContentS("home-name") } if aggrRaidAttr != nil { - isR := aggrRaidAttr.GetChildContentS("is-root-aggregate") - usesSharedDisks := aggrRaidAttr.GetChildContentS("uses-shared-disks") aggregateType := aggrRaidAttr.GetChildContentS("aggregate-type") isC := aggrRaidAttr.GetChildContentS("is-composite") isComposite := isC == "true" isShared := usesSharedDisks == "true" - isRootAggregate := isR == "true" derivedType := getAggregateDerivedType(aggregateType, isComposite, isShared) d.aggrMap[aggrName] = &aggregate{ name: aggrName, isShared: isShared, derivedType: derivedType, node: nodeName, - export: !isRootAggregate, } } } @@ -854,7 +842,7 @@ func (d *Disk) handleCMode(shelves []*node.Node) ([]*matrix.Matrix, error) { } d.Logger.Debug().Msgf("add (%s) instance: %s.%s", attribute, shelfID, key) - for label, labelDisplay := range d.instanceLabels[attribute].Map() { + for label, labelDisplay := range d.instanceLabels[attribute] { if value := obj.GetChildContentS(label); value != "" { instance.SetLabel(labelDisplay, value) } diff --git a/cmd/collectors/zapiperf/plugins/externalserviceoperation/externalserviceoperation.go b/cmd/collectors/zapiperf/plugins/externalserviceoperation/externalserviceoperation.go index 219602607..9294b93b9 100644 --- a/cmd/collectors/zapiperf/plugins/externalserviceoperation/externalserviceoperation.go +++ b/cmd/collectors/zapiperf/plugins/externalserviceoperation/externalserviceoperation.go @@ -19,7 +19,7 @@ func New(p *plugin.AbstractPlugin) plugin.Plugin { func (e *ExternalServiceOperation) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { data := dataMap[e.Object] - datacenterClusterKey := data.GetGlobalLabels().Get("datacenter") + Hyphen + data.GetGlobalLabels().Get("cluster") + Hyphen + datacenterClusterKey := data.GetGlobalLabels()["datacenter"] + Hyphen + data.GetGlobalLabels()["cluster"] + Hyphen for _, instance := range data.GetInstances() { if !instance.IsExportable() { continue diff --git a/cmd/collectors/zapiperf/plugins/volume/volume.go b/cmd/collectors/zapiperf/plugins/volume/volume.go index 6e8fd319a..55ad48c6f 100644 --- a/cmd/collectors/zapiperf/plugins/volume/volume.go +++ b/cmd/collectors/zapiperf/plugins/volume/volume.go @@ -8,6 +8,7 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/set" + "maps" "regexp" "sort" "strings" @@ -37,8 +38,8 @@ func (v *Volume) Init() error { return nil } -//@TODO cleanup logging -//@TODO rewrite using vector arithmetic +// @TODO cleanup logging +// @TODO rewrite using vector arithmetic // will simplify the code a whole!!! func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { @@ -52,6 +53,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error opsKeyPrefix := "temp_" re := regexp.MustCompile(`^(.*)__(\d{4})$`) + fgAggrMap := make(map[string]*set.Set) flexgroupAggrsMap := make(map[string]*set.Set) // volume_aggr_labels metric is deprecated now and will be removed later. metricName := "labels" @@ -78,17 +80,17 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error key := i.GetLabel("svm") + "." + match[1] if cache.GetInstance(key) == nil { fg, _ := cache.NewInstance(key) - fg.SetLabels(i.GetLabels().Copy()) + fg.SetLabels(maps.Clone(i.GetLabels())) fg.SetLabel("volume", match[1]) - // Flexgroup don't show any aggregate, node - fg.SetLabel("aggr", "") + // Flexgroup don't show any node fg.SetLabel("node", "") fg.SetLabel(style, "flexgroup") + fgAggrMap[key] = set.New() } if volumeAggrmetric.GetInstance(key) == nil { flexgroupInstance, _ := volumeAggrmetric.NewInstance(key) - flexgroupInstance.SetLabels(i.GetLabels().Copy()) + flexgroupInstance.SetLabels(maps.Clone(i.GetLabels())) flexgroupInstance.SetLabel("volume", match[1]) // Flexgroup don't show any node flexgroupInstance.SetLabel("node", "") @@ -98,6 +100,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Error().Err(err).Str("metric", metricName).Msg("Unable to set value on metric") } } + fgAggrMap[key].Add(i.GetLabel("aggr")) flexgroupAggrsMap[key].Add(i.GetLabel("aggr")) i.SetLabel(style, "flexgroup_constituent") i.SetExportable(false) @@ -109,7 +112,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Error().Err(err).Str("key", key).Msg("Failed to create new instance") continue } - flexvolInstance.SetLabels(i.GetLabels().Copy()) + flexvolInstance.SetLabels(maps.Clone(i.GetLabels())) flexvolInstance.SetLabel(style, "flexvol") if err := metric.SetValueFloat64(flexvolInstance, 1); err != nil { v.Logger.Error().Err(err).Str("metric", metricName).Msg("Unable to set value on metric") @@ -120,7 +123,7 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error v.Logger.Debug().Msgf("extracted %d flexgroup volumes", len(cache.GetInstances())) - //cache.Reset() + // cache.Reset() // create summary for _, i := range data.GetInstances() { @@ -143,6 +146,11 @@ func (v *Volume) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error continue } + // set aggrs label for fg, make sure the order of aggregate is same for each poll + aggrs := fgAggrMap[key].Values() + sort.Strings(aggrs) + fg.SetLabel("aggr", strings.Join(aggrs, ",")) + for mkey, m := range data.GetMetrics() { if !m.IsExportable() && m.GetType() != "float64" { diff --git a/cmd/collectors/zapiperf/zapiperf.go b/cmd/collectors/zapiperf/zapiperf.go index fa76e749c..07ee1e0ae 100644 --- a/cmd/collectors/zapiperf/zapiperf.go +++ b/cmd/collectors/zapiperf/zapiperf.go @@ -43,6 +43,7 @@ import ( "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/set" "github.com/netapp/harvest/v2/pkg/tree/node" + "github.com/rs/zerolog" "strconv" "strings" "time" @@ -907,18 +908,18 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { err error request, response, counterList *node.Node oldMetrics, oldLabels, replaced, missing *set.Set - wanted *dict.Dict + wanted map[string]string oldMetricsSize, oldLabelsSize int counters map[string]*node.Node ) z.scalarCounters = make([]string, 0) counters = make(map[string]*node.Node) - oldMetrics = set.New() // current set of metrics, so we can remove from matrix if not updated - oldLabels = set.New() // current set of labels - wanted = dict.New() // counters listed in template, maps raw name to display name - missing = set.New() // required base counters, missing in template - replaced = set.New() // deprecated and replaced counters + oldMetrics = set.New() // current set of metrics, so we can remove from matrix if not updated + oldLabels = set.New() // current set of labels + wanted = make(map[string]string) // counters listed in template, maps raw name to display name + missing = set.New() // required base counters, missing in template + replaced = set.New() // deprecated and replaced counters mat := z.Matrix[z.Object] for key := range mat.GetMetrics() { @@ -935,16 +936,16 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { if counterList = z.Params.GetChildS("counters"); counterList != nil { for _, cnt := range counterList.GetAllChildContentS() { if renamed := strings.Split(cnt, "=>"); len(renamed) == 2 { - wanted.Set(strings.TrimSpace(renamed[0]), strings.TrimSpace(renamed[1])) + wanted[strings.TrimSpace(renamed[0])] = strings.TrimSpace(renamed[1]) } else if cnt == "instance_name" { - wanted.Set("instance_name", z.object) + wanted["instance_name"] = z.object } else { display := strings.ReplaceAll(cnt, "-", "_") if strings.HasPrefix(display, z.object) { display = strings.TrimPrefix(display, z.object) display = strings.TrimPrefix(display, "_") } - wanted.Set(cnt, display) + wanted[cnt] = display } } } else { @@ -987,7 +988,7 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { counter.SetChildContentS("properties", p) } - display, ok := wanted.GetHas(key) + display, ok := wanted[key] // counter not requested if !ok { z.Logger.Trace(). @@ -1004,7 +1005,8 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { Str("key", key). Str("replacement", r). Msg("Replaced deprecated counter") - if !wanted.Has(r) { + _, ok = wanted[r] + if !ok { replaced.Add(r) } } @@ -1018,9 +1020,12 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { } else { // add counter as numeric metric oldMetrics.Remove(key) - if r := z.addCounter(counter, key, display, true, counters); r != "" && !wanted.Has(r) { - missing.Add(r) // required base counter, missing in template - z.Logger.Trace().Msgf("%smarking [%s] as required base counter for [%s]%s", color.Red, r, key, color.End) + if r := z.addCounter(counter, key, display, true, counters); r != "" { + _, ok = wanted[r] + if !ok { + missing.Add(r) // required base counter, missing in template + z.Logger.Trace().Msgf("%smarking [%s] as required base counter for [%s]%s", color.Red, r, key, color.End) + } } } } @@ -1032,9 +1037,12 @@ func (z *ZapiPerf) PollCounter() (map[string]*matrix.Matrix, error) { if replaced.Has(name) { oldMetrics.Remove(name) z.Logger.Debug().Msgf("adding [%s] (replacement for deprecated counter)", name) - if r := z.addCounter(counter, name, name, true, counters); r != "" && !wanted.Has(r) { - missing.Add(r) // required base counter, missing in template - z.Logger.Debug().Msgf("%smarking [%s] as required base counter for [%s]%s", color.Red, r, name, color.End) + if r := z.addCounter(counter, name, name, true, counters); r != "" { + _, ok := wanted[r] + if !ok { + missing.Add(r) // required base counter, missing in template + z.Logger.Debug().Msgf("%smarking [%s] as required base counter for [%s]%s", color.Red, r, name, color.End) + } } } } @@ -1507,7 +1515,13 @@ func (z *ZapiPerf) updateQosLabels(qos *node.Node, instance *matrix.Instance, ke instance.SetLabel(display, value) } } - z.Logger.Debug().Str("query", z.Query).Str("key", key).Str("qos labels", instance.GetLabels().String()).Send() + if z.Logger.GetLevel() == zerolog.DebugLevel { + z.Logger.Debug(). + Str("query", z.Query). + Str("key", key). + Str("qos labels", dict.String(instance.GetLabels())). + Send() + } } } diff --git a/cmd/collectors/zapiperf/zapiperf_test.go b/cmd/collectors/zapiperf/zapiperf_test.go index f4f8efbf9..9187c4a84 100644 --- a/cmd/collectors/zapiperf/zapiperf_test.go +++ b/cmd/collectors/zapiperf/zapiperf_test.go @@ -36,16 +36,13 @@ func Test_ZapiPerf(t *testing.T) { func NewZapiPerf(object, path string) *ZapiPerf { // homepath is harvest directory level homePath := "../../../" - zapiperfPoller := "testZapiperf" - conf.TestLoadHarvestConfig("testdata/config.yml") - opts := options.Options{ - Poller: zapiperfPoller, - HomePath: homePath, - IsTest: true, - } + opts := options.New(options.WithConfPath(homePath + "/conf")) + opts.Poller = "testZapiperf" + opts.HomePath = homePath + opts.IsTest = true - ac := collector.New("Zapiperf", object, &opts, params(object, path), nil) + ac := collector.New("Zapiperf", object, opts, params(object, path), nil) z := &ZapiPerf{} if err := z.Init(ac); err != nil { log.Fatal().Err(err).Send() diff --git a/cmd/exporters/influxdb/influxdb.go b/cmd/exporters/influxdb/influxdb.go index 137261d75..c8d5bf3d8 100644 --- a/cmd/exporters/influxdb/influxdb.go +++ b/cmd/exporters/influxdb/influxdb.go @@ -253,7 +253,7 @@ func (e *InfluxDB) Render(data *matrix.Matrix) ([][]byte, error) { // only to store global labels that we'll // add to all instances global := NewMeasurement("", 0) - for key, value := range data.GetGlobalLabels().Map() { + for key, value := range data.GetGlobalLabels() { global.AddTag(key, value) } @@ -271,14 +271,14 @@ func (e *InfluxDB) Render(data *matrix.Matrix) ([][]byte, error) { // tag set if includeAll { - for label, value := range instance.GetLabels().Map() { + for label, value := range instance.GetLabels() { if value != "" { m.AddTag(label, value) } } } else { for _, key := range keysToInclude { - if value, has := instance.GetLabels().GetHas(key); has && value != "" { + if value, has := instance.GetLabels()[key]; has && value != "" { m.AddTag(key, value) } } @@ -286,14 +286,14 @@ func (e *InfluxDB) Render(data *matrix.Matrix) ([][]byte, error) { // skip instance without key tags if len(m.tagSet) == 0 { - e.Logger.Debug().Msgf("skip instance (%s), no tag set parsed from labels (%v)", key, instance.GetLabels().Map()) + e.Logger.Debug().Msgf("skip instance (%s), no tag set parsed from labels (%v)", key, instance.GetLabels()) } // field set // strings for _, label := range labelsToInclude { - if value, has := instance.GetLabels().GetHas(label); has && value != "" { + if value, has := instance.GetLabels()[label]; has && value != "" { if value == "true" || value == "false" { m.AddField(label, value) } else { @@ -320,7 +320,7 @@ func (e *InfluxDB) Render(data *matrix.Matrix) ([][]byte, error) { fieldName := metric.GetName() if metric.HasLabels() { - for _, label := range metric.GetLabels().Map() { + for _, label := range metric.GetLabels() { fieldName += "_" + label } } diff --git a/cmd/exporters/influxdb/influxdb_test.go b/cmd/exporters/influxdb/influxdb_test.go index f852e810a..9be1e48f8 100644 --- a/cmd/exporters/influxdb/influxdb_test.go +++ b/cmd/exporters/influxdb/influxdb_test.go @@ -12,10 +12,10 @@ import ( ) func setupInfluxDB(t *testing.T, exporterName string) *InfluxDB { - opts := &options.Options{} + opts := options.New() opts.Debug = true - err := conf.LoadHarvestConfig("../../tools/doctor/testdata/testConfig.yml") + _, err := conf.LoadHarvestConfig("../../tools/doctor/testdata/testConfig.yml") if err != nil { panic(err) } diff --git a/cmd/exporters/prometheus/prometheus.go b/cmd/exporters/prometheus/prometheus.go index e09561ebc..9ec28fc33 100644 --- a/cmd/exporters/prometheus/prometheus.go +++ b/cmd/exporters/prometheus/prometheus.go @@ -37,8 +37,8 @@ import ( // Default parameters const ( - // maximum amount of time we will keep metrics in cache - cacheMaxKeep = "300s" + // the maximum amount of time to keep metrics in the cache + cacheMaxKeep = "5m" // apply a prefix to metrics globally (default none) globalPrefix = "" ) @@ -172,8 +172,8 @@ func (p *Prometheus) Init() error { } // The optional parameter LocalHTTPAddr is the address of the HTTP service, valid values are: - //- "localhost" or "127.0.0.1", this limits access to local machine - //- "" (default) or "0.0.0.0", allows access from network + // - "localhost" or "127.0.0.1", this limits access to local machine + // - "" (default) or "0.0.0.0", allows access from network addr := p.Params.LocalHTTPAddr if addr != "" { p.Logger.Debug().Str("addr", addr).Msg("Using custom local addr") @@ -317,7 +317,7 @@ func (p *Prometheus) render(data *matrix.Matrix) [][]byte { prefix = p.globalPrefix + data.Object - for key, value := range data.GetGlobalLabels().Map() { + for key, value := range data.GetGlobalLabels() { globalLabels = append(globalLabels, escape(replacer, key, value)) } @@ -337,12 +337,13 @@ func (p *Prometheus) render(data *matrix.Matrix) [][]byte { instanceLabelsSet := make(map[string]struct{}) if includeAllLabels { - for label, value := range instance.GetLabels().Map() { + for label, value := range instance.GetLabels() { // temporary fix for the rarely happening duplicate labels // known case is: ZapiPerf -> 7mode -> disk.yaml // actual cause is the Aggregator plugin, which is adding node as // instance label (even though it's already a global label for 7modes) - if !data.GetGlobalLabels().Has(label) { + _, ok := data.GetGlobalLabels()[label] + if !ok { instanceKeys = append(instanceKeys, escape(replacer, label, value)) //nolint:makezero } } @@ -442,8 +443,8 @@ func (p *Prometheus) render(data *matrix.Matrix) [][]byte { histogram.values[index] = value continue } - metricLabels := make([]string, 0, metric.GetLabels().Size()) - for k, v := range metric.GetLabels().Map() { + metricLabels := make([]string, 0, len(metric.GetLabels())) + for k, v := range metric.GetLabels() { metricLabels = append(metricLabels, escape(replacer, k, v)) } x := fmt.Sprintf( diff --git a/cmd/harvest/harvest.go b/cmd/harvest/harvest.go index 2da17067d..c0bf18911 100644 --- a/cmd/harvest/harvest.go +++ b/cmd/harvest/harvest.go @@ -35,7 +35,7 @@ import ( _ "net/http/pprof" // #nosec since pprof is off by default "os" "os/exec" - "path" + "path/filepath" "strconv" "strings" "syscall" @@ -55,6 +55,7 @@ type options struct { loglevel int logToFile bool // only used when running in foreground config string + confPath string profiling bool longStatus bool daemon bool @@ -100,7 +101,8 @@ func doManageCmd(cmd *cobra.Command, args []string) { // cmd.DebugFlags() // uncomment to print flags - if err = conf.LoadHarvestConfig(opts.config); err != nil { + _, err = conf.LoadHarvestConfig(opts.config) + if err != nil { if os.IsNotExist(err) { log.Fatalf("config [%s]: not found\n", opts.config) } @@ -358,7 +360,7 @@ func stopPoller(ps *util.PollerStatus) { func startPoller(pollerName string, promPort int, opts *options) { argv := []string{ - path.Join(HarvestHomePath, "bin", "poller"), + filepath.Join(HarvestHomePath, "bin", "poller"), "--poller", pollerName, "--loglevel", @@ -366,42 +368,40 @@ func startPoller(pollerName string, promPort int, opts *options) { } if promPort != 0 { - argv = append(argv, "--promPort") - argv = append(argv, strconv.Itoa(promPort)) + argv = append(argv, "--promPort", strconv.Itoa(promPort)) } if opts.debug { argv = append(argv, "--debug") } if opts.config != HarvestConfigPath { - argv = append(argv, "--config") - argv = append(argv, opts.config) + argv = append(argv, "--config", opts.config) + } + + if opts.confPath != conf.DefaultConfPath { + argv = append(argv, "--confpath", opts.confPath) } if opts.profiling { if opts.foreground { // Always pick the same port when profiling in foreground - argv = append(argv, "--profiling") - argv = append(argv, "6060") + argv = append(argv, "--profiling", "6060") } else { if port, err := freePort(); err != nil { // No free port, log it and move on fmt.Println("profiling disabled due to no free ports") } else { - argv = append(argv, "--profiling") - argv = append(argv, strconv.Itoa(port)) + argv = append(argv, "--profiling", strconv.Itoa(port)) } } } if len(opts.collectors) > 0 { - argv = append(argv, "--collectors") - argv = append(argv, strings.Join(opts.collectors, ",")) + argv = append(argv, "--collectors", strings.Join(opts.collectors, ",")) } if len(opts.objects) > 0 { - argv = append(argv, "--objects") - argv = append(argv, strings.Join(opts.objects, ",")) + argv = append(argv, "--objects", strings.Join(opts.objects, ",")) } if opts.foreground { @@ -545,6 +545,8 @@ func init() { rootCmd.AddCommand(admin.Cmd()) rootCmd.PersistentFlags().StringVar(&opts.config, "config", "./harvest.yml", "Harvest config file path") + rootCmd.PersistentFlags().StringVar(&opts.confPath, "confpath", "conf", "colon-seperated paths to search for Harvest templates") + rootCmd.Version = version.String() rootCmd.SetVersionTemplate(version.String()) rootCmd.SetUsageTemplate(rootCmd.UsageTemplate() + ` diff --git a/cmd/poller/collector/asup.go b/cmd/poller/collector/asup.go index 9f55aa954..13128c96c 100644 --- a/cmd/poller/collector/asup.go +++ b/cmd/poller/collector/asup.go @@ -112,6 +112,7 @@ type AsupCollector struct { Schedules []Schedule Exporters []string Counters Counters + InstanceInfo *InstanceInfo `json:"InstanceInfo,omitempty"` } const ( diff --git a/cmd/poller/collector/helpers.go b/cmd/poller/collector/helpers.go index ca7cdf5a0..042113dd9 100644 --- a/cmd/poller/collector/helpers.go +++ b/cmd/poller/collector/helpers.go @@ -3,12 +3,6 @@ Copyright NetApp Inc, 2021 All rights reserved This file contains helper functions and methods for Poller, AbstractCollector and collectors - -@TODO: review which functions really belong here -@TODO: review which methods should actually be functions - - (e.g. ImportSubTemplate is not abstract enough to be a method - of AbstractCollector) */ package collector @@ -23,150 +17,164 @@ import ( "github.com/netapp/harvest/v2/cmd/poller/plugin/labelagent" "github.com/netapp/harvest/v2/cmd/poller/plugin/max" "github.com/netapp/harvest/v2/cmd/poller/plugin/metricagent" + "github.com/netapp/harvest/v2/pkg/conf" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/tree" "github.com/netapp/harvest/v2/pkg/tree/node" "os" - "path" + "path/filepath" "regexp" "sort" "strings" ) -// ImportTemplate retrieves the config (template) of a collector, arguments are: -// @confDir - path of Harvest config directory (usually /etc/harvest) -// @confFn - template filename (e.g. default.yaml or custom.yaml) -// @collectorName - name of the collector -func ImportTemplate(confPath, confFn, collectorName string) (*node.Node, error) { - fp := path.Join(confPath, "conf/", strings.ToLower(collectorName), confFn) - return tree.ImportYaml(fp) +// ImportTemplate looks for a collector's template by searching confPaths for the first template that exists in +// confPath/collectorName/templateName +func ImportTemplate(confPaths []string, templateName, collectorName string) (*node.Node, error) { + homePath := conf.Path() + for _, confPath := range confPaths { + fp := filepath.Join(homePath, confPath, strings.ToLower(collectorName), templateName) + _, err := os.Stat(fp) + if errors.Is(err, os.ErrNotExist) { + continue + } + return tree.ImportYaml(fp) + } + return nil, fmt.Errorf("template not found on confPath") } var versionRegex = regexp.MustCompile(`\d+\.\d+\.\d+`) // ImportSubTemplate retrieves the best matching subtemplate of a collector object. -// -// This method is only applicable to the Zapi/ZapiPerf collectors which have -// multiple objects and each object is forked as a separate collector. -// The subtemplates are sorted in subdirectories that serve as "tag" for the -// matching ONTAP version. ImportSubTemplate will attempt to choose the subtemplate -// with the closest matching ONTAP version. -// -// Arguments: -// @model - ONTAP model, either cdot or 7mode -// @filename - name of the subtemplate -// @version - ONTAP version triple (generation, major, minor) +// This method is applicable to collectors which have multiple objects. +// Each object is forked as a separate collector. +// The sub-templates exist in subdirectories named after ONTAP versions. These directories +// are sorted, and we try to return the subtemplate that most closely matches the ONTAP version. +// Model is cdot or 7mode, filename is the name of the subtemplate, and ver is the +// ONTAP version triple (generation, major, minor) func (c *AbstractCollector) ImportSubTemplate(model, filename string, ver [3]int) (*node.Node, string, error) { var ( - selectedVersion, pathPrefix, templatePath string - availableVersions []string - err error - customTemplateErr error - finalTemplate *node.Node - customTemplate *node.Node + selectedVersion, templatePath string + customTemplateErr error + finalTemplate *node.Node + customTemplate *node.Node ) - //split filename by comma - // in case of custom.yaml having same key, file names will be concatenated by comma + // Filename will be the name of a template (volume.yaml) or, when merging templates, a comma-seperated + // string like "volume.yaml,custom_volume.yaml" filenames := strings.Split(filename, ",") + verWithDots := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(ver)), "."), "[]") + ontapVersion, err := version.NewVersion(verWithDots) + if err != nil { + return nil, "", fmt.Errorf("no best-fit template found due to err=%w", err) + } + homePath := conf.Path() + +nextFile: for _, f := range filenames { - pathPrefix = c.GetTemplatePathPrefix(model) - c.Logger.Debug().Msgf("Looking for best-fitting template in [%s]", pathPrefix) - verWithDots := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(ver)), "."), "[]") - - // check for available versions, those are the subdirectories that include filename - if files, err := os.ReadDir(pathPrefix); err == nil { - for _, file := range files { - if !file.IsDir() { - continue - } - submatch := versionRegex.FindStringSubmatch(file.Name()) - if len(submatch) > 0 { - if templates, err := os.ReadDir(path.Join(pathPrefix, file.Name())); err == nil { - for _, t := range templates { - if t.Name() == f { - c.Logger.Trace().Msgf("available version dir: [%s]", file.Name()) - availableVersions = append(availableVersions, file.Name()) - break - } - } - } - } + for _, confPath := range c.Options.ConfPaths { + selectedVersion, err = c.findBestFit(homePath, confPath, f, model, ontapVersion) + if err != nil || selectedVersion == "" { + continue } - } else { - return nil, "", err - } - c.Logger.Trace().Msgf("checking for %d available versions: %v", len(availableVersions), availableVersions) - - if len(availableVersions) > 0 { - versions := make([]*version.Version, len(availableVersions)) - for i, raw := range availableVersions { - v, err := version.NewVersion(raw) - if err != nil { - c.Logger.Trace().Msgf("error parsing version: %s err: %s", raw, err) + + templatePath = filepath.Join(selectedVersion, f) + c.Logger.Info().Str("path", templatePath).Str("v", verWithDots).Msg("best-fit template") + if finalTemplate == nil { + finalTemplate, err = tree.ImportYaml(templatePath) + if err == nil { + finalTemplate.PreprocessTemplate() + continue nextFile + } + } else { + // any errors w.r.t customTemplate are warnings and should not be returned to caller + customTemplate, customTemplateErr = tree.ImportYaml(templatePath) + if customTemplateErr != nil { + c.Logger.Warn().Err(err).Str("path", templatePath). + Msg("Unable to import template file. File is invalid or empty") continue } - versions[i] = v - } - - sort.Sort(version.Collection(versions)) - - verS, err := version.NewVersion(verWithDots) - if err != nil { - c.Logger.Trace().Msgf("error parsing ONTAP version: %s err: %s", verWithDots, err) - return nil, "", errors.New("no best-fitting subtemplate version found") - } - // get closest index - idx := getClosestIndex(versions, verS) - if idx >= 0 && idx < len(versions) { - selectedVersion = versions[idx].String() + if customTemplateErr == nil { + customTemplate.PreprocessTemplate() + finalTemplate.Merge(customTemplate, nil) + continue nextFile + } } } - if selectedVersion == "" { + if finalTemplate == nil { // workaround for 7mode template that will always be missing in cdot if c.Object == "Status_7mode" && model == "cdot" { return nil, "", errs.New(errs.ErrWrongTemplate, "unable to load status_7.yaml on cdot") } - return nil, "", errors.New("no best-fit template found") + return nil, "", fmt.Errorf("no best-fit template for %s on %s", filename, c.Options.ConfPath) } + } - templatePath = path.Join(pathPrefix, selectedVersion, f) - c.Logger.Info(). - Str("path", templatePath). - Str("v", verWithDots). - Msg("best-fit template") - if finalTemplate == nil { - finalTemplate, err = tree.ImportYaml(templatePath) - if err == nil { - finalTemplate.PreprocessTemplate() - } - } else { - // any errors w.r.t customTemplate are warnings and should not be returned to caller - customTemplate, customTemplateErr = tree.ImportYaml(templatePath) - if customTemplate == nil || customTemplateErr != nil { - c.Logger.Warn().Err(err).Str("template", templatePath). - Msg("Unable to import template file. File is invalid or empty") - continue - } - if customTemplateErr == nil { - customTemplate.PreprocessTemplate() - // merge templates - finalTemplate.Merge(customTemplate, nil) + return finalTemplate, templatePath, err +} + +func (c *AbstractCollector) findBestFit(homePath string, confPath string, name string, model string, ontapVersion *version.Version) (string, error) { + var ( + selectedVersion string + availableVersions []string + ) + + pathPrefix := filepath.Join(homePath, confPath, strings.ToLower(c.Name), model) + c.Logger.Debug().Str("pathPrefix", pathPrefix).Msg("Looking for best-fitting template in pathPrefix") + + // check for available versions, these are the subdirectories with matching filenames + files, err := os.ReadDir(pathPrefix) + if err != nil { + return "", err + } + for _, file := range files { + if !file.IsDir() { + continue + } + if versionRegex.MatchString(file.Name()) { + if templates, err := os.ReadDir(filepath.Join(pathPrefix, file.Name())); err == nil { + for _, t := range templates { + if t.Name() == name { + c.Logger.Trace().Str("dir", file.Name()).Msg("available version dir") + availableVersions = append(availableVersions, file.Name()) + break + } + } } } } - return finalTemplate, templatePath, err -} -func (c *AbstractCollector) GetTemplatePathPrefix(model string) string { - return path.Join(c.Options.HomePath, "conf/", strings.ToLower(c.Name), model) + c.Logger.Trace().Strs("availableVersions", availableVersions).Msg("checking available versions") + + if len(availableVersions) == 0 { + return "", nil + } + + versions := make([]*version.Version, len(availableVersions)) + for i, raw := range availableVersions { + v, err := version.NewVersion(raw) + if err != nil { + c.Logger.Trace().Err(err).Str("raw", raw).Msg("unable to parse version") + continue + } + versions[i] = v + } + sort.Sort(version.Collection(versions)) + + // get closest index + idx := getClosestIndex(versions, ontapVersion) + if idx >= 0 && idx < len(versions) { + selectedVersion = versions[idx].String() + } + + return filepath.Join(pathPrefix, selectedVersion), nil } // getClosestIndex returns the closest left match to the sorted list of input versions -// returns -1 when the versions list is empty +// returns -1 when the version's list is empty // returns equal or closest match to the left func getClosestIndex(versions []*version.Version, version *version.Version) int { if len(versions) == 0 { @@ -198,11 +206,6 @@ func GetBuiltinPlugin(name string, abc *plugin.AbstractPlugin) plugin.Plugin { if name == "Max" { return max.New(abc) } - /* this will be added in soon - if name == "Calculator" {--4 - return calculator.New(abc) - } - */ if name == "LabelAgent" { return labelagent.New(abc) diff --git a/cmd/poller/collector/helpers_test.go b/cmd/poller/collector/helpers_test.go index 9a8aee592..4b0a7897e 100644 --- a/cmd/poller/collector/helpers_test.go +++ b/cmd/poller/collector/helpers_test.go @@ -2,6 +2,7 @@ package collector import ( "github.com/hashicorp/go-version" + "github.com/netapp/harvest/v2/pkg/conf" "sort" "testing" ) @@ -47,3 +48,16 @@ func Test_getClosestIndex(t *testing.T) { }) } } + +func Test_HARVEST_CONF(t *testing.T) { + t.Setenv(conf.HomeEnvVar, "testdata") + template, err := ImportTemplate([]string{"conf"}, "test.yaml", "test") + if err != nil { + t.Errorf(`got err="%v", want no err`, err) + return + } + name := template.GetChildContentS("collector") + if name != "Test" { + t.Errorf("collectorName got=%s, want=Test", name) + } +} diff --git a/cmd/poller/collector/testdata/conf/test/test.yaml b/cmd/poller/collector/testdata/conf/test/test.yaml new file mode 100644 index 000000000..68a3e6ee1 --- /dev/null +++ b/cmd/poller/collector/testdata/conf/test/test.yaml @@ -0,0 +1,2 @@ + +collector: Test diff --git a/cmd/poller/options/options.go b/cmd/poller/options/options.go index d70f01ac4..d6e40598d 100644 --- a/cmd/poller/options/options.go +++ b/cmd/poller/options/options.go @@ -13,17 +13,16 @@ package options import ( - "fmt" "github.com/netapp/harvest/v2/pkg/conf" + "github.com/rs/zerolog" "os" - "strings" + "path/filepath" ) type Options struct { - Poller string // name of the Poller - Daemon bool // if true, Poller is started as daemon - Debug bool // if true, Poller is started in debug mode - // this mostly means that no data will be exported + Poller string // name of the Poller + Daemon bool // if true, Poller is started as daemon + Debug bool // if true, Poller is started in debug mode, which means data will not be exported PromPort int // HTTP port that is assigned to Poller and can be used by the Prometheus exporter Config string // filepath of Harvest config (defaults to "harvest.yml") can be relative or absolute path HomePath string // path to harvest home (usually "/opt/harvest") @@ -35,39 +34,62 @@ type Options struct { Collectors []string // name of collectors to load (override poller config) Objects []string // objects to load (overrides collector config) Profiling int // in case of profiling, the HTTP port used to display results - Asup bool // if true, invoke autosupport at start up + Asup bool // if true, invoke autosupport at start-up IsTest bool // true when run from unit test + ConfPath string // colon-seperated paths to search for templates + ConfPaths []string // sliced version of `ConfPath`, list of paths to search for templates } -// String provides a string representation of Options -func (o *Options) String() string { - x := []string{ - fmt.Sprintf("%s= %s", "Poller", o.Poller), - fmt.Sprintf("%s = %v", "Daemon", o.Daemon), - fmt.Sprintf("%s = %v", "Debug", o.Debug), - fmt.Sprintf("%s = %d", "Profiling", o.Profiling), - fmt.Sprintf("%s = %d", "PromPort", o.PromPort), - fmt.Sprintf("%s = %d", "LogLevel", o.LogLevel), - fmt.Sprintf("%s = %s", "HomePath", o.HomePath), - fmt.Sprintf("%s = %s", "LogPath", o.LogPath), - fmt.Sprintf("%s = %s", "Config", o.Config), - fmt.Sprintf("%s = %s", "Hostname", o.Hostname), - fmt.Sprintf("%s = %s", "Version", o.Version), - fmt.Sprintf("%s = %v", "Asup", o.Asup), +func New(opts ...Option) *Options { + o := &Options{} + for _, opt := range opts { + opt(o) } - return strings.Join(x, ", ") + o.SetDefaults() + return o } -// Print writes Options to STDOUT -func (o *Options) Print() { - fmt.Println(o.String()) +type Option func(*Options) + +func WithConfPath(path string) Option { + return func(o *Options) { + o.ConfPath = path + } +} + +func WithConfigPath(path string) Option { + return func(o *Options) { + o.Config = path + } } -func SetPathsAndHostname(args *Options) { +func (o *Options) MarshalZerologObject(e *zerolog.Event) { + e.Str("config", o.Config) + e.Str("confPath", o.ConfPath) + e.Bool("daemon", o.Daemon) + e.Bool("debug", o.Debug) + e.Int("profiling", o.Profiling) + e.Int("promPort", o.PromPort) + e.Str("homePath", o.HomePath) + e.Str("logPath", o.LogPath) + e.Str("logPath", o.LogPath) + e.Str("hostname", o.Hostname) + e.Bool("asup", o.Asup) +} + +func (o *Options) SetDefaults() *Options { if hostname, err := os.Hostname(); err == nil { - args.Hostname = hostname + o.Hostname = hostname } - args.HomePath = conf.Path() - args.LogPath = conf.GetHarvestLogPath() + o.HomePath = conf.Path() + o.LogPath = conf.GetHarvestLogPath() + o.SetConfPath(o.ConfPath) + + return o +} + +func (o *Options) SetConfPath(colonSeperatedPath string) { + o.ConfPath = colonSeperatedPath + o.ConfPaths = filepath.SplitList(colonSeperatedPath) } diff --git a/cmd/poller/options/options_test.go b/cmd/poller/options/options_test.go index 7398d1d00..b69f4aaef 100644 --- a/cmd/poller/options/options_test.go +++ b/cmd/poller/options/options_test.go @@ -6,8 +6,7 @@ import "testing" // See https://github.com/NetApp/harvest/issues/28 func TestConfigPath(t *testing.T) { want := "foo" - options := Options{Config: want} - SetPathsAndHostname(&options) + options := New(WithConfigPath(want)) if options.Config != want { t.Fatalf(`options.Config expected=[%q], actual was=[%q]`, want, options.Config) diff --git a/cmd/poller/plugin/aggregator/aggregator.go b/cmd/poller/plugin/aggregator/aggregator.go index dcca39f6d..d8b0c105c 100644 --- a/cmd/poller/plugin/aggregator/aggregator.go +++ b/cmd/poller/plugin/aggregator/aggregator.go @@ -6,8 +6,11 @@ package aggregator import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/rs/zerolog" + "golang.org/x/exp/maps" "regexp" "strings" ) @@ -150,7 +153,9 @@ func (a *Aggregator) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, e continue } - a.Logger.Trace().Msgf("handling instance with labels [%s]", instance.GetLabels().String()) + if a.Logger.GetLevel() == zerolog.TraceLevel { + a.Logger.Trace().Msgf("handling instance with labels [%s]", dict.String(instance.GetLabels())) + } for i, rule := range a.rules { @@ -172,7 +177,7 @@ func (a *Aggregator) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, e } if rule.allLabels { - objKey = strings.Join(instance.GetLabels().Values(), ".") + objKey = strings.Join(maps.Values(instance.GetLabels()), ".") } else if len(rule.includeLabels) != 0 { objKey = objName for _, k := range rule.includeLabels { diff --git a/cmd/poller/plugin/labelagent/label_agent.go b/cmd/poller/plugin/labelagent/label_agent.go index c5c83d6d1..394bd0891 100644 --- a/cmd/poller/plugin/labelagent/label_agent.go +++ b/cmd/poller/plugin/labelagent/label_agent.go @@ -7,6 +7,7 @@ package labelagent import ( "fmt" "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" "strings" @@ -189,7 +190,7 @@ func (a *LabelAgent) excludeEquals(matrix *matrix.Matrix) error { instance.SetExportable(false) a.Logger.Trace().Str("label", r.label). Str("value", r.value). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("excludeEquals: excluded") break } @@ -206,7 +207,7 @@ func (a *LabelAgent) excludeContains(matrix *matrix.Matrix) error { instance.SetExportable(false) a.Logger.Trace().Str("label", r.label). Str("value", r.value). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("excludeContains: excluded") break } @@ -223,7 +224,7 @@ func (a *LabelAgent) excludeRegex(matrix *matrix.Matrix) error { instance.SetExportable(false) a.Logger.Trace().Str("label", r.label). Str("regex", r.reg.String()). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("excludeRegex: excluded") break } @@ -242,7 +243,7 @@ func (a *LabelAgent) includeEquals(matrix *matrix.Matrix) error { isExport = true a.Logger.Trace().Str("label", r.label). Str("value", r.value). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("includeEquals: included") break } @@ -263,7 +264,7 @@ func (a *LabelAgent) includeContains(matrix *matrix.Matrix) error { isExport = true a.Logger.Trace().Str("label", r.label). Str("value", r.value). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("includeContains: included") break } @@ -286,7 +287,7 @@ func (a *LabelAgent) includeRegex(matrix *matrix.Matrix) error { isExport = true a.Logger.Trace().Str("label", r.label). Str("regex", r.reg.String()). - Str("instance labels", instance.GetLabels().String()). + Str("instance labels", dict.String(instance.GetLabels())). Msg("includeRegex: included") break } diff --git a/cmd/poller/plugin/labelagent/label_agent_test.go b/cmd/poller/plugin/labelagent/label_agent_test.go index 27ec7998a..d4a0afe2b 100644 --- a/cmd/poller/plugin/labelagent/label_agent_test.go +++ b/cmd/poller/plugin/labelagent/label_agent_test.go @@ -6,6 +6,7 @@ package labelagent import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/matrix" "github.com/netapp/harvest/v2/pkg/tree/node" "testing" @@ -79,9 +80,9 @@ func TestSplitSimpleRule(t *testing.T) { instance, _ := m.NewInstance("0") instance.SetLabel("X", "a/b/c/d") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.splitSimple(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("C") != "c" || instance.GetLabel("D") != "d" { t.Error("Labels C and D don't have expected values") @@ -95,9 +96,9 @@ func TestSplitRegexRule(t *testing.T) { instance, _ := m.NewInstance("0") instance.SetLabel("X", "xxxA22_B333") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.splitRegex(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("A") != "A22" || instance.GetLabel("B") != "B333" { t.Error("Labels A and B don't have expected values") @@ -111,9 +112,9 @@ func TestSplitPairsRule(t *testing.T) { instance, _ := m.NewInstance("0") instance.SetLabel("X", "owner:jack contact:some@email") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.splitPairs(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("owner") != "jack" || instance.GetLabel("contact") != "some@email" { t.Error("Labels owner and contact don't have expected values") @@ -128,9 +129,9 @@ func TestJoinSimpleRule(t *testing.T) { instance.SetLabel("A", "aaa") instance.SetLabel("B", "bbb") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.joinSimple(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("X") != "aaa_bbb" { t.Error("Label A does have expected value") @@ -144,9 +145,9 @@ func TestReplaceSimpleRule(t *testing.T) { instance, _ := m.NewInstance("0") instance.SetLabel("A", "aaa_X") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.replaceSimple(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("A") != "X" || instance.GetLabel("B") != "bbb_X" { t.Error("Labels A and B don't have expected values") @@ -160,9 +161,9 @@ func TestReplaceRegexRule(t *testing.T) { instance, _ := m.NewInstance("0") instance.SetLabel("A", "aaa_12345_abcDEF") - t.Logf("before = [%s]\n", instance.GetLabels().String()) + t.Logf("before = [%s]\n", dict.String(instance.GetLabels())) _ = p.replaceRegex(m) - t.Logf("after = [%s]\n", instance.GetLabels().String()) + t.Logf("after = [%s]\n", dict.String(instance.GetLabels())) if instance.GetLabel("B") != "abcDEF-12345-bbb" { t.Error("Label B does not have expected value") diff --git a/cmd/poller/plugin/max/max.go b/cmd/poller/plugin/max/max.go index 80d00adfe..37bc27c99 100644 --- a/cmd/poller/plugin/max/max.go +++ b/cmd/poller/plugin/max/max.go @@ -6,8 +6,10 @@ package max import ( "github.com/netapp/harvest/v2/cmd/poller/plugin" + "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/matrix" + "github.com/rs/zerolog" "regexp" "strconv" "strings" @@ -125,7 +127,7 @@ func (m *Max) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { key := strconv.Itoa(i) + k - //Create matrix for each metric as each metric may have an instance with different label + // Create matrix for each metric as each metric may have an instance with different label matrices[key] = data.Clone(matrix.With{Data: false, Metrics: true, Instances: false, ExportInstances: true}) matrices[key].RemoveExceptMetric(k) @@ -134,7 +136,7 @@ func (m *Max) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { } else { matrices[key].Object = strings.ToLower(rule.label) + "_" + data.Object } - //UUID needs to be unique + // UUID needs to be unique matrices[key].UUID += key matrices[key].SetExportOptions(matrix.DefaultExportOptions()) matrices[key].SetExportable(true) @@ -159,7 +161,9 @@ func (m *Max) Run(dataMap map[string]*matrix.Matrix) ([]*matrix.Matrix, error) { continue } - m.Logger.Trace().Msgf("handling instance with labels [%s]", instance.GetLabels().String()) + if m.Logger.GetLevel() == zerolog.TraceLevel { + m.Logger.Trace().Msgf("handling instance with labels [%s]", dict.String(instance.GetLabels())) + } for i, rule := range m.rules { diff --git a/cmd/poller/poller.go b/cmd/poller/poller.go index 4f223221e..0726735ff 100644 --- a/cmd/poller/poller.go +++ b/cmd/poller/poller.go @@ -74,13 +74,14 @@ import ( // default params var ( - pollerSchedule = "60s" + pollerSchedule = "1m" logFileName = "" logMaxMegaBytes = logging.DefaultLogMaxMegaBytes logMaxBackups = logging.DefaultLogMaxBackups logMaxAge = logging.DefaultLogMaxAge asupSchedule = "24h" // send every 24 hours asupFirstWrite = "4m" // after this time, write 1st autosupport payload (for testing) + opts *options.Options ) const ( @@ -128,17 +129,18 @@ type Poller struct { // starts collectors and exporters func (p *Poller) Init() error { - var err error + var ( + err error + fileLoggingEnabled bool + consoleLoggingEnabled bool + configPath string + ) - // read options - options.SetPathsAndHostname(&args) - p.options = &args - p.name = args.Poller + p.options = opts.SetDefaults() + p.name = opts.Poller - var fileLoggingEnabled bool - var consoleLoggingEnabled bool zeroLogLevel := logging.GetZerologLevel(p.options.LogLevel) - // if we are daemon, use file logging + // if we are a daemon, use file logging if p.options.Daemon { fileLoggingEnabled = true } else { @@ -149,21 +151,29 @@ func (p *Poller) Init() error { logFileName = "poller_" + p.name + ".log" } - err = conf.LoadHarvestConfig(p.options.Config) + configPath, err = conf.LoadHarvestConfig(p.options.Config) if err != nil { // separate logger is not yet configured as it depends on setting logMaxMegaBytes, logMaxFiles later // Using default instance of logger which logs below error to harvest.log logging.Get().SubLogger("Poller", p.name).Error(). - Str("config", p.options.Config).Err(err).Msg("Unable to read config") + Str("config", p.options.Config). + Str("configPath", configPath). + Err(err). + Msg("Unable to read config") return err } p.params, err = conf.PollerNamed(p.name) if err != nil { logging.Get().SubLogger("Poller", p.name).Error(). - Str("config", p.options.Config).Err(err).Msg("Failed to find poller") + Str("config", p.options.Config). + Str("configPath", configPath). + Err(err). + Msg("Failed to find poller") return err } + p.mergeConfPath() + // log handling parameters // size of file before rotating if p.params.LogMaxBytes != 0 { @@ -187,11 +197,6 @@ func (p *Poller) Init() error { MaxAge: logMaxAge} logger = logging.Configure(logConfig) - logger.Info(). - Str("logLevel", zeroLogLevel.String()). - Str("configPath", p.options.Config). - Str("version", version.String()). - Msg("Init") // if profiling port > 0 start profiling service if p.options.Profiling > 0 { @@ -202,9 +207,12 @@ func (p *Poller) Init() error { }() } - // useful info for debugging - logger.Debug().Msgf("* %s *s", version.String()) - logger.Debug().Msgf("options= %s", p.options.String()) + logger.Info(). + Str("logLevel", zeroLogLevel.String()). + Str("configPath", configPath). + Str("version", strings.TrimSpace(version.String())). + EmbedObject(p.options). + Msg("Init") // set signal handler for graceful termination signalChannel := make(chan os.Signal, 1) @@ -232,7 +240,7 @@ func (p *Poller) Init() error { } // each poller is associated with a remote host - // if no address is specified, assume that is local host + // if no address is specified, assume localhost if p.params.Addr == "" { p.target = "localhost" } else { @@ -242,7 +250,7 @@ func (p *Poller) Init() error { // create a shared auth service that all collectors will use p.auth = auth.NewCredentials(p.params, logger) - // initialize our metadata, the metadata will host status of our + // initialize our metadata, the metadata will host the status of our // collectors and exporters, as well as ping stats to target host p.loadMetadata() p.exporterParams = conf.Config.Exporters @@ -581,13 +589,18 @@ func (p *Poller) readObjects(c conf.Collector) ([]objectCollector, error) { // object name or list of objects if c.Templates != nil { for _, t := range *c.Templates { - if subTemplate, err = collector.ImportTemplate(p.options.HomePath, t, class); err != nil { + if subTemplate, err = collector.ImportTemplate(p.options.ConfPaths, t, class); err != nil { logEvent := logger.Warn() if t == "custom.yaml" { // make this less noisy since it won't exist for most people logEvent = logger.Debug() } - logEvent.Str("err", err.Error()).Msg("Unable to load template.") + logEvent. + Str("err", err.Error()). + Strs("confPaths", p.options.ConfPaths). + Str("template", t). + Str("collector", class). + Msg("Unable to load template.") continue } if template == nil { @@ -595,7 +608,7 @@ func (p *Poller) readObjects(c conf.Collector) ([]objectCollector, error) { } else { logger.Debug().Str("template", t).Msg("Merged template.") if c.Name == "Zapi" || c.Name == "ZapiPerf" { - // do not overwrite child of objects. They will be concatenated + // Do not overwrite child of objects. They will be concatenated template.Merge(subTemplate, []string{"objects"}) } else { template.Merge(subTemplate, []string{""}) @@ -1152,7 +1165,7 @@ func (p *Poller) doZAPIsExist() error { ) // connect to the cluster and retrieve the system version - if poller, err = conf.PollerNamed(args.Poller); err != nil { + if poller, err = conf.PollerNamed(opts.Poller); err != nil { return err } if connection, err = zapi.New(poller, p.auth); err != nil { @@ -1162,10 +1175,22 @@ func (p *Poller) doZAPIsExist() error { return connection.Init(2) } +// set the poller's confPath using the following precedence: +// CLI, harvest.yml, default (conf) +func (p *Poller) mergeConfPath() { + path := conf.DefaultConfPath + if p.params.ConfPath != "" { + path = p.params.ConfPath + } + if p.options.ConfPath != conf.DefaultConfPath { + path = p.options.ConfPath + } + p.options.SetConfPath(path) +} + func startPoller(_ *cobra.Command, _ []string) { - // cmd.DebugFlags() // uncomment to print flags poller := &Poller{} - poller.options = &args + poller.options = opts if poller.Init() != nil { // error already logger by poller poller.Stop() @@ -1175,29 +1200,27 @@ func startPoller(_ *cobra.Command, _ []string) { os.Exit(0) } -var args = options.Options{ - Version: version.VERSION, -} - func init() { - configPath := conf.Path(conf.HarvestYML) + opts = options.New() + opts.Version = version.VERSION var flags = pollerCmd.Flags() - flags.StringVarP(&args.Poller, "poller", "p", "", "Poller name as defined in config") - flags.BoolVarP(&args.Debug, "debug", "d", false, "Debug mode, no data will be exported") - flags.BoolVar(&args.Daemon, "daemon", false, "Start as daemon") - flags.IntVarP(&args.LogLevel, "loglevel", "l", 2, "Logging level (0=trace, 1=debug, 2=info, 3=warning, 4=error, 5=critical)") - flags.BoolVar(&args.LogToFile, "logtofile", false, "When running in the foreground, log to file instead of stdout") - flags.IntVar(&args.Profiling, "profiling", 0, "If profiling port > 0, enables profiling via localhost:PORT/debug/pprof/") - flags.IntVar(&args.PromPort, "promPort", 0, "Prometheus Port") - flags.StringVar(&args.Config, "config", configPath, "Harvest config file path") - flags.StringSliceVarP(&args.Collectors, "collectors", "c", []string{}, "Only start these collectors (overrides harvest.yml)") - flags.StringSliceVarP(&args.Objects, "objects", "o", []string{}, "Only start these objects (overrides collector config)") + flags.StringVarP(&opts.Poller, "poller", "p", "", "Poller name as defined in config") + flags.BoolVarP(&opts.Debug, "debug", "d", false, "Debug mode, no data will be exported") + flags.BoolVar(&opts.Daemon, "daemon", false, "Start as daemon") + flags.IntVarP(&opts.LogLevel, "loglevel", "l", 2, "Logging level (0=trace, 1=debug, 2=info, 3=warning, 4=error, 5=critical)") + flags.BoolVar(&opts.LogToFile, "logtofile", false, "When running in the foreground, log to file instead of stdout") + flags.IntVar(&opts.Profiling, "profiling", 0, "If profiling port > 0, enables profiling via localhost:PORT/debug/pprof/") + flags.IntVar(&opts.PromPort, "promPort", 0, "Prometheus Port") + flags.StringVar(&opts.Config, "config", conf.HarvestYML, "Harvest config file path") + flags.StringSliceVarP(&opts.Collectors, "collectors", "c", []string{}, "Only start these collectors (overrides harvest.yml)") + flags.StringSliceVarP(&opts.Objects, "objects", "o", []string{}, "Only start these objects (overrides collector config)") + flags.StringVar(&opts.ConfPath, "confpath", conf.DefaultConfPath, "colon-seperated paths to search for Harvest templates") // Used to test autosupport at startup. An environment variable is used instead of a cmdline // arg, so we don't have to also add this testing arg to harvest cli if isAsup := os.Getenv("ASUP"); isAsup != "" { - args.Asup = true + opts.Asup = true } _ = pollerCmd.MarkFlagRequired("poller") diff --git a/cmd/tools/doctor/doctor.go b/cmd/tools/doctor/doctor.go index a1842d958..33c753ddd 100644 --- a/cmd/tools/doctor/doctor.go +++ b/cmd/tools/doctor/doctor.go @@ -90,42 +90,45 @@ func doMerge(path1 string, path2 string) { func doDoctorCmd(cmd *cobra.Command, _ []string) { var config = cmd.Root().PersistentFlags().Lookup("config") - doDoctor(conf.ConfigPath(config.Value.String())) + var confPaths = cmd.Root().PersistentFlags().Lookup("confpath") + + doDoctor(conf.ConfigPath(config.Value.String()), confPaths.Value.String()) } -func doDoctor(path string) { - contents, err := os.ReadFile(path) - if err != nil { - fmt.Printf("error reading config file. err=%+v\n", err) - return - } +func doDoctor(path string, confPath string) { if opts.ShouldPrintConfig { + contents, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading config file. err=%+v\n", err) + return + } printRedactedConfig(path, contents) } - checkAll(path, contents) + checkAll(path, confPath) } // checkAll runs all doctor checks // If all checks succeed, print nothing and exit with a return code of 0 // Otherwise, print what failed and exit with a return code of 1 -func checkAll(path string, contents []byte) { +func checkAll(path string, confPath string) { // See https://github.com/NetApp/harvest/issues/16 for more checks to add color.DetectConsole(opts.Color) - // Validate that the config file can be parsed - harvestConfig := &conf.HarvestConfig{} - err := yaml.Unmarshal(contents, harvestConfig) + + _, err := conf.LoadHarvestConfig(path) if err != nil { fmt.Printf("error reading config file=[%s] %+v\n", path, err) os.Exit(1) return } + cfg := conf.Config + confPaths := filepath.SplitList(confPath) anyFailed := false - anyFailed = !checkUniquePromPorts(*harvestConfig).isValid || anyFailed - anyFailed = !checkPollersExportToUniquePromPorts(*harvestConfig).isValid || anyFailed - anyFailed = !checkExporterTypes(*harvestConfig).isValid || anyFailed - anyFailed = !checkCustomYaml("").isValid || anyFailed - anyFailed = !checkCollectorName(*harvestConfig).isValid || anyFailed + anyFailed = !checkUniquePromPorts(cfg).isValid || anyFailed + anyFailed = !checkPollersExportToUniquePromPorts(cfg).isValid || anyFailed + anyFailed = !checkExporterTypes(cfg).isValid || anyFailed + anyFailed = !checkConfTemplates(confPaths).isValid || anyFailed + anyFailed = !checkCollectorName(cfg).isValid || anyFailed if anyFailed { os.Exit(1) @@ -179,62 +182,62 @@ func checkCollectorName(config conf.HarvestConfig) validation { return valid } -func checkCustomYaml(confParent string) validation { +func checkConfTemplates(confPaths []string) validation { valid := validation{isValid: true} - confDir := conf.Path("conf") - if confParent != "" { - confDir = path.Join(confParent, "conf") - } - dir, err := os.ReadDir(confDir) - if err != nil { - fmt.Printf("unable to read directory=%s err=%s\n", confDir, err) - } - for _, f := range dir { - if !f.IsDir() { - continue - } - flavor := f.Name() - custom := path.Join(confDir, flavor, "custom.yaml") - if _, err := os.Stat(custom); errors.Is(err, os.ErrNotExist) { - continue - } - template, err := collector.ImportTemplate(confParent, "custom.yaml", flavor) + for _, confDir := range confPaths { + dir, err := os.ReadDir(confDir) if err != nil { - valid.isValid = false - valid.invalid = append(valid.invalid, fmt.Sprintf(`%s is empty or invalid err=%+v`, custom, err)) - continue - } - s := template.GetChildS("objects") - if s == nil { - valid.isValid = false - msg := fmt.Sprintf(`%s should have a top-level "objects" key`, custom) - valid.invalid = append(valid.invalid, msg) + fmt.Printf("unable to read directory=%s err=%s\n", confDir, err) continue } - if s.Children == nil { - valid.isValid = false - msg := fmt.Sprintf("%s objects section should be a map of object: path", custom) - valid.invalid = append(valid.invalid, msg) - } else { - for _, t := range s.Children { - if len(t.Content) == 0 { - valid.isValid = false - msg := fmt.Sprintf("%s objects section should be a map of object: path", custom) - valid.invalid = append(valid.invalid, msg) - continue - } - searchDir := path.Join(confDir, flavor) - if !templateExists(searchDir, t.GetContentS()) { - valid.isValid = false - msg := fmt.Sprintf(`%s references template file "%s" which does not exist in %s`, - custom, t.GetContentS(), path.Join(searchDir, "**")) - valid.invalid = append(valid.invalid, msg) - continue + for _, f := range dir { + if !f.IsDir() { + continue + } + flavor := f.Name() + custom := path.Join(confDir, flavor, "custom.yaml") + if _, err := os.Stat(custom); errors.Is(err, os.ErrNotExist) { + continue + } + template, err := collector.ImportTemplate(confPaths, "custom.yaml", flavor) + if err != nil { + valid.isValid = false + valid.invalid = append(valid.invalid, fmt.Sprintf(`%s is empty or invalid err=%+v`, custom, err)) + continue + } + s := template.GetChildS("objects") + if s == nil { + valid.isValid = false + msg := fmt.Sprintf(`%s should have a top-level "objects" key`, custom) + valid.invalid = append(valid.invalid, msg) + continue + } + if s.Children == nil { + valid.isValid = false + msg := fmt.Sprintf("%s objects section should be a map of object: path", custom) + valid.invalid = append(valid.invalid, msg) + } else { + for _, t := range s.Children { + if len(t.Content) == 0 { + valid.isValid = false + msg := fmt.Sprintf("%s objects section should be a map of object: path", custom) + valid.invalid = append(valid.invalid, msg) + continue + } + searchDir := path.Join(confDir, flavor) + if !templateExists(searchDir, t.GetContentS()) { + valid.isValid = false + msg := fmt.Sprintf(`%s references template file "%s" which does not exist in %s`, + custom, t.GetContentS(), path.Join(searchDir, "**")) + valid.invalid = append(valid.invalid, msg) + continue + } } } } } + if len(valid.invalid) > 0 { fmt.Printf("%s: Problems found in custom.yaml files\n", color.Colorize("Error", color.Red)) for _, s := range valid.invalid { diff --git a/cmd/tools/doctor/doctor_test.go b/cmd/tools/doctor/doctor_test.go index c45a18e3f..f068f8914 100644 --- a/cmd/tools/doctor/doctor_test.go +++ b/cmd/tools/doctor/doctor_test.go @@ -104,24 +104,24 @@ func TestCustomYamlIsValid(t *testing.T) { tests := []test{ { - path: "testdata/conf1", + path: "testdata/conf1/conf", numInvalid: 1, msgContains: "top-level", }, { - path: "testdata/conf2", + path: "testdata/conf2/conf", numInvalid: 1, msgContains: "should be a map", }, { - path: "testdata/conf3", + path: "testdata/conf3/conf", numInvalid: 1, msgContains: "does not exist", }, } for _, tt := range tests { t.Run(tt.path, func(t *testing.T) { - valid := checkCustomYaml(tt.path) + valid := checkConfTemplates([]string{tt.path}) if valid.isValid { t.Errorf("want isValid=%t, got %t", false, valid.isValid) } diff --git a/cmd/tools/generate/generate.go b/cmd/tools/generate/generate.go index 20f3a57fc..ad4c00448 100644 --- a/cmd/tools/generate/generate.go +++ b/cmd/tools/generate/generate.go @@ -29,7 +29,6 @@ type PollerInfo struct { ContainerName string ShowPorts bool IsFull bool - TemplateDir string CertDir string Mounts []string } @@ -61,11 +60,12 @@ type options struct { filesdPath string showPorts bool outputPath string - templateDir string certDir string promPort int grafanaPort int mounts []string + configPath string + confPath string } var opts = &options{ @@ -105,22 +105,28 @@ var metricCmd = &cobra.Command{ } func doDockerFull(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateFullCompose(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateDocker(full) } + func doSystemd(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateSystemd(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateSystemd() } func doDockerCompose(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateDockerCompose(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateDocker(harvest) } func doGenerateMetrics(cmd *cobra.Command, _ []string) { - var config = cmd.Root().PersistentFlags().Lookup("config") - generateMetrics(conf.ConfigPath(config.Value.String())) + addRootOptions(cmd) + generateMetrics() +} + +func addRootOptions(cmd *cobra.Command) { + opts.configPath = conf.ConfigPath(cmd.Root().PersistentFlags().Lookup("config").Value.String()) + opts.confPath = cmd.Root().PersistentFlags().Lookup("confpath").Value.String() } const ( @@ -129,36 +135,32 @@ const ( harvestAdminService = "harvest-admin.service" ) -func generateFullCompose(path string) { - generateDocker(path, full) -} - -func generateDockerCompose(path string) { - generateDocker(path, harvest) -} - func normalizeContainerNames(name string) string { re := regexp.MustCompile("[._]") return strings.ToLower(re.ReplaceAllString(name, "-")) } -func generateDocker(path string, kind int) { - pollerTemplate := PollerTemplate{} +func generateDocker(kind int) { + var ( + pollerTemplate PollerTemplate + configFilePath string + certDirPath string + filesd []string + out *os.File + ) + + pollerTemplate = PollerTemplate{} promTemplate := PromTemplate{ opts.grafanaPort, opts.promPort, } - err := conf.LoadHarvestConfig(path) + _, err := conf.LoadHarvestConfig(opts.configPath) if err != nil { - return + logErrAndExit(err) } - configFilePath := path - - templateDirPath := opts.templateDir - - certDirPath := opts.certDir + configFilePath = asComposePath(opts.configPath) + certDirPath = asComposePath(opts.certDir) - var filesd []string for _, v := range conf.Config.PollersOrdered { port, _ := conf.GetPrometheusExporterPorts(v, true) pollerInfo := PollerInfo{ @@ -169,11 +171,10 @@ func generateDocker(path string, kind int) { LogLevel: opts.loglevel, Image: opts.image, ContainerName: normalizeContainerNames("poller_" + v), - ShowPorts: kind == harvest || opts.showPorts, + ShowPorts: opts.showPorts, IsFull: kind == full, - TemplateDir: templateDirPath, CertDir: certDirPath, - Mounts: opts.mounts, + Mounts: makeMounts(v), } pollerTemplate.Pollers = append(pollerTemplate.Pollers, pollerInfo) filesd = append(filesd, fmt.Sprintf("- targets: ['%s:%d']", pollerInfo.ServiceName, pollerInfo.Port)) @@ -184,7 +185,6 @@ func generateDocker(path string, kind int) { logErrAndExit(err) } - var out *os.File color.DetectConsole("") out, err = os.Create(opts.outputPath) if err != nil { @@ -247,6 +247,16 @@ func generateDocker(path string, kind int) { } _, _ = fmt.Fprintf(os.Stderr, "Wrote file_sd targets to %s\n", opts.filesdPath) + if os.Getenv("HARVEST_DOCKER") != "" { + srcFolder := "/opt/harvest" + destFolder := "/opt/temp" + + err = copyFiles(srcFolder, destFolder) + if err != nil { + logErrAndExit(err) + } + } + if kind == harvest { _, _ = fmt.Fprintf(os.Stderr, "Start containers with:\n"+ @@ -259,6 +269,105 @@ func generateDocker(path string, kind int) { } } +// setup mount(s) for the confpath and any CLI-passed mounts +func makeMounts(pollerName string) []string { + var mounts = opts.mounts + + p, err := conf.PollerNamed(pollerName) + if err != nil { + logErrAndExit(err) + } + + confPath := opts.confPath + if confPath == "conf" { + confPath = p.ConfPath + } + + if confPath == "" { + mounts = append(mounts, toMount("./conf")) + } else { + paths := strings.Split(confPath, ":") + for _, path := range paths { + mounts = append(mounts, toMount(path)) + } + } + + return mounts +} + +func toMount(hostPath string) string { + hostPath = asComposePath(hostPath) + if strings.HasPrefix(hostPath, "./") { + return hostPath + ":" + "/opt/harvest/" + hostPath[2:] + } + return hostPath + ":" + hostPath +} + +func copyFiles(srcPath, destPath string) error { + filesToExclude := map[string]bool{ + "harvest.yml": true, + "harvest.yml.example": true, + "prom-stack.tmpl": true, + } + dirsToExclude := map[string]bool{ + "bin": true, + "autosupport": true, + } + return filepath.Walk(srcPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Generate the destination path + relPath, err := filepath.Rel(srcPath, path) + if err != nil { + return err + } + dest := filepath.Join(destPath, relPath) + + if info.IsDir() { + // Skip excluded directories + if dirsToExclude[info.Name()] { + return filepath.SkipDir + } + // Create the directory + return os.MkdirAll(dest, 0750) + } + + // Skip excluded files + if filesToExclude[info.Name()] { + return nil + } + + // Copy the file + return copyFile(path, dest) + }) +} + +func copyFile(srcPath, destPath string) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer silentClose(srcFile) + + destFile, err := os.Create(destPath) + if err != nil { + return err + } + defer silentClose(destFile) + + _, err = io.Copy(destFile, srcFile) + return err +} + +func asComposePath(path string) string { + if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "./") { + return path + } + return "./" + path +} + func logErrAndExit(err error) { fmt.Printf("%v\n", err) os.Exit(1) @@ -268,11 +377,11 @@ func silentClose(body io.ReadCloser) { _ = body.Close() } -func generateSystemd(path string) { +func generateSystemd() { var adminService string - err := conf.LoadHarvestConfig(path) + _, err := conf.LoadHarvestConfig(opts.configPath) if err != nil { - return + logErrAndExit(err) } if conf.Config.Pollers == nil { return @@ -289,7 +398,7 @@ func generateSystemd(path string) { println("and " + color.Colorize("cp "+harvestAdminService+" /etc/systemd/system/", color.Green)) } println("and then run " + color.Colorize("systemctl daemon-reload", color.Green)) - writeAdminSystemd(path) + writeAdminSystemd(opts.configPath) // reorder list of pollers so that unix collectors are last, see https://github.com/NetApp/harvest/issues/643 pollers := make([]string, 0) unixPollers := make([]string, 0) @@ -344,7 +453,7 @@ func writeAdminSystemd(configFp string) { println(color.Colorize("✓", color.Green) + " HTTP SD file: " + harvestAdminService + " created") } -func generateMetrics(path string) { +func generateMetrics() { var ( poller *conf.Poller err error @@ -352,13 +461,13 @@ func generateMetrics(path string) { zapiClient *zapi.Client ) - err = conf.LoadHarvestConfig(path) + _, err = conf.LoadHarvestConfig(opts.configPath) if err != nil { - return + logErrAndExit(err) } if poller, _, err = rest.GetPollerAndAddr(opts.Poller); err != nil { - return + logErrAndExit(err) } timeout, _ := time.ParseDuration(rest.DefaultTimeout) @@ -402,10 +511,9 @@ func init() { "logging level (0=trace, 1=debug, 2=info, 3=warning, 4=error, 5=critical)", ) dFlags.StringVar(&opts.image, "image", "ghcr.io/netapp/harvest:latest", "Harvest image. Use rahulguptajss/harvest:latest to pull from Docker Hub") - dFlags.StringVar(&opts.templateDir, "templatedir", "./conf", "Harvest template dir path") dFlags.StringVar(&opts.certDir, "certdir", "./cert", "Harvest certificate dir path") dFlags.StringVarP(&opts.outputPath, "output", "o", "", "Output file path. ") - dFlags.BoolVarP(&opts.showPorts, "port", "p", false, "Expose poller ports to host machine") + dFlags.BoolVarP(&opts.showPorts, "port", "p", true, "Expose poller ports to host machine") _ = dockerCmd.MarkPersistentFlagRequired("output") dFlags.StringSliceVar(&opts.mounts, "volume", []string{}, "Additional volume mounts to include in compose file") diff --git a/cmd/tools/generate/generate_test.go b/cmd/tools/generate/generate_test.go new file mode 100644 index 000000000..bde7879b1 --- /dev/null +++ b/cmd/tools/generate/generate_test.go @@ -0,0 +1,22 @@ +package generate + +import "testing" + +func Test_toMount(t *testing.T) { + tests := []struct { + name string + hostPath string + want string + }{ + {name: "dot prefix", hostPath: "./abc/d", want: "./abc/d:/opt/harvest/abc/d"}, + {name: "absolute", hostPath: "/x/y/z", want: "/x/y/z:/x/y/z"}, + {name: "cwd", hostPath: "abc/d", want: "./abc/d:/opt/harvest/abc/d"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := toMount(tt.hostPath); got != tt.want { + t.Errorf("toMount() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cmd/tools/grafana/dashboard_test.go b/cmd/tools/grafana/dashboard_test.go index d7d151b39..4563702c7 100644 --- a/cmd/tools/grafana/dashboard_test.go +++ b/cmd/tools/grafana/dashboard_test.go @@ -27,7 +27,7 @@ var aggregationPattern = regexp.MustCompile(`\b(sum|count|min|max)\b`) func checkThreshold(t *testing.T, path string, data []byte) { path = shortPath(path) var thresholdMap = map[string][]string{ - // _latency are in microseconds + // _latencies are in microseconds "_latency": { "[\"green\",\"orange\",\"red\"]", "[null,20000,30000]", @@ -37,7 +37,7 @@ func checkThreshold(t *testing.T, path string, data []byte) { "[null,60,80]", }, } - // visit all panel for datasource test + // visit all panels for datasource test visitAllPanels(data, func(p string, key, value gjson.Result) { panelTitle := value.Get("title").String() kind := value.Get("type").String() @@ -62,7 +62,7 @@ func checkThreshold(t *testing.T, path string, data []byte) { "table": {"color-background", "lcd-gauge"}, "stat": {"background"}, } - // check in default also for stat. For table we only want relevant column background and override settings + // check in default also for stat. For table, we only want the relevant column background and override settings if kind == "stat" { dS := value.Get("fieldConfig.defaults") tSlice := dS.Get("thresholds") @@ -1080,19 +1080,19 @@ func checkPercentHasMinMax(t *testing.T, path string, data []byte) { if defaultUnit != "percent" && defaultUnit != "percentunit" { return } - min := value.Get("fieldConfig.defaults.min").String() - max := value.Get("fieldConfig.defaults.max").String() - if min != "0" { + theMin := value.Get("fieldConfig.defaults.min").String() + theMax := value.Get("fieldConfig.defaults.max").String() + if theMin != "0" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, min should be 0 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, min) + dashPath, path, value.Get("title").String(), defaultUnit, theMin) } - if defaultUnit == "percent" && max != "100" { + if defaultUnit == "percent" && theMax != "100" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, max should be 100 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, max) + dashPath, path, value.Get("title").String(), defaultUnit, theMax) } - if defaultUnit == "percentunit" && max != "1" { + if defaultUnit == "percentunit" && theMax != "1" { t.Errorf(`dashboard=%s path=%s panel="%s" has unit=%s, max should be 1 got=%s`, - dashPath, path, value.Get("title").String(), defaultUnit, max) + dashPath, path, value.Get("title").String(), defaultUnit, theMax) } }) } @@ -1259,3 +1259,23 @@ func checkDashboardTime(t *testing.T, path string, data []byte) { t.Errorf("dashboard=%s time.to got=%s want=%s", dashPath, to.String(), toWant) } } + +func TestNoDrillDownRows(t *testing.T) { + visitDashboards(dashboards, func(path string, data []byte) { + checkRowNames(t, path, data) + }) +} + +func checkRowNames(t *testing.T, path string, data []byte) { + path = shortPath(path) + visitAllPanels(data, func(p string, key, value gjson.Result) { + kind := value.Get("type").String() + if kind == "row" { + title := value.Get("title").String() + if strings.Contains(title, "Drilldown") { + t.Errorf(`dashboard=%s path=panels[%d] title=[%s] got row with Drilldown in title. Remove drilldown`, path, key.Int(), title) + } + } + }) + +} diff --git a/cmd/tools/grafana/grafana.go b/cmd/tools/grafana/grafana.go index 2440b009a..c4aabf309 100644 --- a/cmd/tools/grafana/grafana.go +++ b/cmd/tools/grafana/grafana.go @@ -185,25 +185,27 @@ func exportFiles(dir string, folder *Folder) error { } func addSvmRegex(content []byte, fileName string, val string) []byte { - var err error - newContent := content - var svmExpression []string + svmExpression := []string{"templating.list.#(name=\"SVM\")"} if fileName == "snapmirror.json" { svmExpression = []string{"templating.list.#(name=\"DestinationSVM\")", "templating.list.#(name=\"SourceSVM\")"} - } else { - svmExpression = []string{"templating.list.#(name=\"SVM\")"} } for _, s := range svmExpression { + var err error svm := gjson.GetBytes(content, s) if svm.Exists() { - newContent, err = sjson.SetBytes(newContent, s+".regex", []byte(val)) + content, err = sjson.SetBytes(content, s+".regex", []byte(val)) + if err != nil { + fmt.Printf("Error while setting svm regex: %v\n", err) + continue + } + content, err = sjson.SetBytes(content, s+".allValue", json.RawMessage("null")) if err != nil { - fmt.Printf("error while setting svm regex") + fmt.Printf("Error while setting svm allValue: %v\n", err) continue } } } - return newContent + return content } func addLabel(content []byte, label string, labelMap map[string]string) []byte { @@ -349,7 +351,7 @@ func newLabelVar(label string) []byte { func doImport(_ *cobra.Command, _ []string) { opts.command = "import" - err := conf.LoadHarvestConfig(opts.config) + _, err := conf.LoadHarvestConfig(opts.config) if err != nil { return } @@ -729,7 +731,7 @@ func checkToken(opts *options, ignoreConfig bool, tries int) error { configPath = opts.config - err = conf.LoadHarvestConfig(configPath) + _, err = conf.LoadHarvestConfig(configPath) if err != nil { return err } diff --git a/cmd/tools/rest/client.go b/cmd/tools/rest/client.go index 5b8c6ac88..0aaff5ce7 100644 --- a/cmd/tools/rest/client.go +++ b/cmd/tools/rest/client.go @@ -158,9 +158,9 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { doInvoke := func() ([]byte, error) { var ( - response *http.Response - body []byte - err error + response *http.Response + innerBody []byte + innerErr error ) if c.request.Body != nil { @@ -174,23 +174,23 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { restReq := c.request.URL.String() // send request to server - if response, err = c.client.Do(c.request); err != nil { - return nil, fmt.Errorf("connection error %w", err) + if response, innerErr = c.client.Do(c.request); innerErr != nil { + return nil, fmt.Errorf("connection error %w", innerErr) } //goland:noinspection GoUnhandledErrorResult defer response.Body.Close() + innerBody, innerErr = io.ReadAll(response.Body) + if innerErr != nil { + return nil, errs.Rest(response.StatusCode, innerErr.Error(), 0, "") + } if response.StatusCode != http.StatusOK { - body2, err2 := io.ReadAll(response.Body) - if err2 != nil { - return nil, errs.Rest(response.StatusCode, err2.Error(), 0, "") - } if response.StatusCode == http.StatusUnauthorized { return nil, errs.New(errs.ErrAuthFailed, response.Status) } - result := gjson.GetBytes(body2, "error") + result := gjson.GetBytes(innerBody, "error") if response.StatusCode == http.StatusForbidden { message := result.Get(Message).String() @@ -206,16 +206,9 @@ func (c *Client) invokeWithAuthRetry() ([]byte, error) { return nil, errs.Rest(response.StatusCode, "", 0, "") } - // read response body - if body, err = io.ReadAll(response.Body); err != nil { - return nil, err - } - defer c.printRequestAndResponse(restReq, body) + defer c.printRequestAndResponse(restReq, innerBody) - if err != nil { - return nil, err - } - return body, nil + return innerBody, nil } body, err = doInvoke() @@ -308,13 +301,13 @@ func (c *Client) Init(retries int) error { continue } - results := gjson.GetManyBytes(content, "name", "uuid", "version.full", "version.generation", "version.major", "version.minor") - c.cluster.Name = results[0].String() - c.cluster.UUID = results[1].String() - c.cluster.Info = results[2].String() - c.cluster.Version[0] = int(results[3].Int()) - c.cluster.Version[1] = int(results[4].Int()) - c.cluster.Version[2] = int(results[5].Int()) + results := gjson.ParseBytes(content) + c.cluster.Name = results.Get("name").String() + c.cluster.UUID = results.Get("uuid").String() + c.cluster.Info = results.Get("version.full").String() + c.cluster.Version[0] = int(results.Get("version.generation").Int()) + c.cluster.Version[1] = int(results.Get("version.major").Int()) + c.cluster.Version[2] = int(results.Get("version.minor").Int()) return nil } return err diff --git a/cmd/tools/rest/rest.go b/cmd/tools/rest/rest.go index 383735c4e..a42a402dc 100644 --- a/cmd/tools/rest/rest.go +++ b/cmd/tools/rest/rest.go @@ -112,7 +112,7 @@ func doShow(_ *cobra.Command, a []string) { if !c.isValid { return } - err := conf.LoadHarvestConfig(args.Config) + _, err := conf.LoadHarvestConfig(args.Config) if err != nil { log.Fatal(err) } @@ -450,16 +450,12 @@ func fetch(client *Client, href string, records *[]gjson.Result, downloadAll boo return fmt.Errorf("error making request %w", err) } - isNonIterRestCall := false - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href") - data := output[0] - numRecords := output[1] - next := output[2] - if !data.Exists() { - isNonIterRestCall = true - } + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") - if isNonIterRestCall { + if !data.Exists() { contentJSON := `{"records":[]}` response, err := sjson.SetRawBytes([]byte(contentJSON), "records.-1", getRest) if err != nil { @@ -503,11 +499,11 @@ func fetchAnalytics(client *Client, href string, records *[]gjson.Result, analyt return fmt.Errorf("error making request %w", err) } - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href", "analytics") - data := output[0] - numRecords := output[1] - next := output[2] - *analytics = output[3] + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") + *analytics = output.Get("analytics") // extract returned records since paginated records need to be merged into a single lists if numRecords.Exists() && numRecords.Int() > 0 { @@ -546,11 +542,10 @@ func FetchRestPerfData(client *Client, href string, perfRecords *[]PerfRecord) e } // extract returned records since paginated records need to be merged into a single list - output := gjson.GetManyBytes(getRest, "records", "num_records", "_links.next.href") - - data := output[0] - numRecords := output[1] - next := output[2] + output := gjson.ParseBytes(getRest) + data := output.Get("records") + numRecords := output.Get("num_records") + next := output.Get("_links.next.href") if numRecords.Exists() && numRecords.Int() > 0 { p := PerfRecord{Records: data, Timestamp: time.Now().UnixNano()} diff --git a/cmd/tools/template/template_test.go b/cmd/tools/template/template_test.go index a1ac542ac..864c6301f 100644 --- a/cmd/tools/template/template_test.go +++ b/cmd/tools/template/template_test.go @@ -517,7 +517,7 @@ func visitTemplates(t *testing.T, eachTemplate func(path string, model TemplateM } err = addPluginLabels(path, &model) if err != nil { - //t.Errorf("failed to addPluginLabels template path=%s err=%v", shortPath(path), err) + // t.Errorf("failed to addPluginLabels template path=%s err=%v", shortPath(path), err) return err } eachTemplate(path, model) @@ -611,6 +611,11 @@ func findCustomPlugins(path string, template *node.Node, model *TemplateModel) e splits := strings.Split(path, "/") pluginGo := fmt.Sprintf("../../../cmd/collectors/%s/plugins/%s/%s.go", splits[4], goPluginName, goPluginName) + // Both Zapi and REST sensor.yaml templates uses a single plugin defined in power.go + if strings.Contains(path, "sensor.yaml") { + pluginGo = "../../../cmd/collectors/power.go" + } + err2 := readPlugin(pluginGo, model) if err2 != nil { return err2 diff --git a/cmd/tools/zapi/export.go b/cmd/tools/zapi/export.go index 74af3b3d7..7a020bcb6 100644 --- a/cmd/tools/zapi/export.go +++ b/cmd/tools/zapi/export.go @@ -10,7 +10,7 @@ import ( "github.com/netapp/harvest/v2/pkg/tree/node" "github.com/netapp/harvest/v2/pkg/tree/yaml" "os" - "path" + "path/filepath" "strings" ) @@ -104,12 +104,12 @@ func exportCounters(item *node.Node, c *client.Client, args *Args) error { fp = append(fp, "9.8.0") fp = append(fp, strings.ReplaceAll(args.Object, ":", "_")+".yaml") - if err = os.MkdirAll(path.Join(fp[:5]...), 0750); err != nil { + if err = os.MkdirAll(filepath.Join(fp[:5]...), 0750); err != nil { fmt.Println("mkdirall") return err } - templateFp := path.Join(fp...) + templateFp := filepath.Join(fp...) if err = os.WriteFile(templateFp, dump, 0600); err != nil { fmt.Println("writefile") @@ -128,13 +128,13 @@ func exportCounters(item *node.Node, c *client.Client, args *Args) error { return nil } - if custom, err = collector.ImportTemplate(harvestHomePath, "custom.yaml", "zapiperf"); err != nil { + if custom, err = collector.ImportTemplate([]string{"conf"}, "custom.yaml", "zapiperf"); err != nil { custom = node.NewS("") custom.NewChildS("collector", "ZapiPerf") custom.NewChildS("objects", "") } - customFp := path.Join(harvestHomePath, "conf/", "zapiperf/", "custom.yaml") + customFp := filepath.Join(harvestHomePath, "conf/", "zapiperf/", "custom.yaml") if objects := custom.GetChildS("objects"); objects != nil { diff --git a/cmd/tools/zapi/zapi.go b/cmd/tools/zapi/zapi.go index 3b583519b..b52d9c8df 100644 --- a/cmd/tools/zapi/zapi.go +++ b/cmd/tools/zapi/zapi.go @@ -129,7 +129,7 @@ func doCmd(cmd string) { connection *client.Client ) - err = conf.LoadHarvestConfig(args.Config) + _, err = conf.LoadHarvestConfig(args.Config) if err != nil { log.Fatal(err) } diff --git a/conf/rest/9.10.0/aggr.yaml b/conf/rest/9.10.0/aggr.yaml index 44475b8dd..6d0cd880b 100644 --- a/conf/rest/9.10.0/aggr.yaml +++ b/conf/rest/9.10.0/aggr.yaml @@ -40,6 +40,8 @@ counters: - space.efficiency_without_snapshots.savings => efficiency_savings_wo_snapshots - space.efficiency_without_snapshots_flexclones.logical_used => logical_used_wo_snapshots_flexclones - space.efficiency_without_snapshots_flexclones.savings => efficiency_savings_wo_snapshots_flexclones + - space.footprint => space_performance_tier_used + - space.footprint_percent => space_performance_tier_used_percent - space.snapshot.available => snapshot_size_available - space.snapshot.reserve_percent => snapshot_reserve_percent - space.snapshot.total => snapshot_size_total diff --git a/conf/rest/9.10.0/volume.yaml b/conf/rest/9.10.0/volume.yaml index 64b750399..e24702db1 100644 --- a/conf/rest/9.10.0/volume.yaml +++ b/conf/rest/9.10.0/volume.yaml @@ -93,6 +93,16 @@ endpoints: - filter: - privilege_level=diagnostic + - query: api/private/cli/volume/footprint + counters: + - ^^volume + - ^^vserver => svm + - volume_blocks_footprint_bin0 => performance_tier_footprint + - volume_blocks_footprint_bin0_percent => performance_tier_footprint_percent + - volume_blocks_footprint_bin1 => capacity_tier_footprint + - volume_blocks_footprint_bin1_percent => capacity_tier_footprint_percent + + plugins: - Volume: schedule: diff --git a/conf/rest/9.12.0/aggr.yaml b/conf/rest/9.12.0/aggr.yaml index 60918227e..4e83030eb 100644 --- a/conf/rest/9.12.0/aggr.yaml +++ b/conf/rest/9.12.0/aggr.yaml @@ -50,6 +50,8 @@ counters: - space.efficiency_without_snapshots.savings => efficiency_savings_wo_snapshots - space.efficiency_without_snapshots_flexclones.logical_used => logical_used_wo_snapshots_flexclones - space.efficiency_without_snapshots_flexclones.savings => efficiency_savings_wo_snapshots_flexclones + - space.footprint => space_performance_tier_used + - space.footprint_percent => space_performance_tier_used_percent - space.snapshot.available => snapshot_size_available - space.snapshot.reserve_percent => snapshot_reserve_percent - space.snapshot.total => snapshot_size_total diff --git a/conf/rest/9.12.0/lif.yaml b/conf/rest/9.12.0/lif.yaml index a348ee2c4..e5aef78db 100644 --- a/conf/rest/9.12.0/lif.yaml +++ b/conf/rest/9.12.0/lif.yaml @@ -3,9 +3,9 @@ query: api/network/ip/interfaces object: lif counters: + - ^^ip.address => address - ^^name => lif - ^^svm.name => svm - - ^ip.address => address - ^ipspace.name => ipspace - ^location.home_node.name => home_node - ^location.home_port.name => home_port @@ -20,16 +20,17 @@ counters: endpoints: - query: api/private/cli/network/interface counters: + - ^^address => address - ^^lif - ^^vserver => svm - ^data_protocol => protocols export_options: instance_keys: + - address - lif - svm instance_labels: - - address - home_node - home_port - ipspace diff --git a/conf/rest/9.12.0/netroute.yaml b/conf/rest/9.12.0/netroute.yaml index d33f6c22e..1bca860c9 100644 --- a/conf/rest/9.12.0/netroute.yaml +++ b/conf/rest/9.12.0/netroute.yaml @@ -3,21 +3,20 @@ query: api/network/ip/routes object: net_route counters: - - ^^uuid => uuid - - ^destination.address => destination - - ^destination.family => family - - ^destination.netmask => netmask_length - - ^gateway => gateway - - ^interfaces.#.ip.address => interface_address # Added in Ontap 9.9 - - ^interfaces.#.name => interface_name # Added in Ontap 9.9 - - ^ipspace.name => ipspace - - ^scope => scope - - ^svm.name => svm + - ^^uuid => uuid + - ^destination.address => destination + - ^destination.family => family + - ^destination.netmask => netmask_length + - ^gateway => gateway + - ^ipspace.name => ipspace + - ^scope => scope + - ^svm.name => svm + - ^{interfaces.#.name,interfaces.#.ip.address} => interfaces # Added in Ontap 9.9 - hidden_fields: - interfaces plugins: - - NetRoute #Creates net_route_interface_labels from interface_name and interface_address metrics collected above + - NetRoute #Creates net_route_interface_labels from interfaces metrics collected above export_options: instance_keys: diff --git a/conf/rest/9.9.0/volume.yaml b/conf/rest/9.9.0/volume.yaml index 01c062b08..5fe0ecc70 100644 --- a/conf/rest/9.9.0/volume.yaml +++ b/conf/rest/9.9.0/volume.yaml @@ -88,6 +88,16 @@ endpoints: - filter: - privilege_level=diagnostic + - query: api/private/cli/volume/footprint + counters: + - ^^volume + - ^^vserver => svm + - volume_blocks_footprint_bin0 => performance_tier_footprint + - volume_blocks_footprint_bin0_percent => performance_tier_footprint_percent + - volume_blocks_footprint_bin1 => capacity_tier_footprint + - volume_blocks_footprint_bin1_percent => capacity_tier_footprint_percent + + plugins: - Volume: schedule: diff --git a/conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml b/conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml new file mode 100644 index 000000000..7365b7595 --- /dev/null +++ b/conf/zapi/cdot/9.10.0/aggr_object_store_config.yaml @@ -0,0 +1,33 @@ + +name: CloudTarget +query: aggr-object-store-config-get-iter +object: cloud_target + +counters: + aggr-object-store-config-info: + - ^^object-store-uuid => uuid + - ^access-key => access_key + - ^auth-type => authentication_type + - ^ipspace => ipspace + - ^is-certificate-validation-enabled => certificate_validation_enabled + - ^object-store-name => container + - ^port => port + - ^provider-type => provider_type + - ^s3-name => name + - ^server => server + - ^ssl-enabled => ssl_enabled + - used-space => used + +export_options: + instance_keys: + - container + - name + - server + instance_labels: + - access_key + - authentication_type + - certificate_validation_enabled + - ipspace + - port + - provider_type + - ssl_enabled \ No newline at end of file diff --git a/conf/zapi/cdot/9.8.0/lif.yaml b/conf/zapi/cdot/9.8.0/lif.yaml index 7b6e3c021..46dde0b41 100644 --- a/conf/zapi/cdot/9.8.0/lif.yaml +++ b/conf/zapi/cdot/9.8.0/lif.yaml @@ -4,9 +4,9 @@ object: lif counters: net-interface-info: + - ^^address => address - ^^interface-name => lif - ^^vserver => svm - - ^address => address - ^current-node => node - ^current-port => port - ^home-node => home_node @@ -25,10 +25,10 @@ collect_only_labels: true export_options: instance_keys: + - address - lif - svm instance_labels: - - address - home_node - home_port - ipspace diff --git a/conf/zapi/cdot/9.8.0/svm.yaml b/conf/zapi/cdot/9.8.0/svm.yaml index 1f31da08b..887fd3a7d 100644 --- a/conf/zapi/cdot/9.8.0/svm.yaml +++ b/conf/zapi/cdot/9.8.0/svm.yaml @@ -18,7 +18,7 @@ collect_only_labels: true plugins: - SVM: schedule: - - data: 900s # should be multiple of data poll duration + - data: 15m # should be multiple of data poll duration - LabelAgent: replace: - type root_svm `data` `No` diff --git a/conf/zapi/cdot/9.8.0/volume.yaml b/conf/zapi/cdot/9.8.0/volume.yaml index 5072a1f8b..1298ae3ab 100644 --- a/conf/zapi/cdot/9.8.0/volume.yaml +++ b/conf/zapi/cdot/9.8.0/volume.yaml @@ -72,7 +72,7 @@ counters: plugins: - Volume: schedule: - - data: 900s # should be multiple of data poll duration + - data: 15m # should be multiple of data poll duration - MetricAgent: compute_metric: - inode_used_percent PERCENT inode_files_used inode_files_total diff --git a/conf/zapi/default.yaml b/conf/zapi/default.yaml index abc8c29e7..fd471c83b 100644 --- a/conf/zapi/default.yaml +++ b/conf/zapi/default.yaml @@ -9,6 +9,7 @@ objects: Aggregate: aggr.yaml AggregateEfficiency: aggr_efficiency.yaml CIFSSession: cifs_session.yaml + CloudTarget: aggr_object_store_config.yaml ClusterPeer: clusterpeer.yaml Disk: disk.yaml EmsDestination: ems_destination.yaml @@ -37,4 +38,3 @@ objects: Support: support.yaml SVM: svm.yaml Volume: volume.yaml - diff --git a/container/onePollerPerContainer/Dockerfile b/container/onePollerPerContainer/Dockerfile index f9585b093..af8aa7b01 100644 --- a/container/onePollerPerContainer/Dockerfile +++ b/container/onePollerPerContainer/Dockerfile @@ -12,7 +12,7 @@ ARG ASUP_MAKE_TARGET=build # Set the Current Working Directory inside the container WORKDIR $BUILD_DIR -RUN mkdir -p $INSTALL_DIR $INSTALL_DIR/container/onePollerPerContainer +RUN mkdir -p $INSTALL_DIR $INSTALL_DIR/container/onePollerPerContainer $INSTALL_DIR/container/prometheus $INSTALL_DIR/cert COPY . . @@ -24,13 +24,16 @@ fi RUN cp -a $BUILD_DIR/harvest.yml $INSTALL_DIR/harvest.yml.example -RUN cp -aR bin $BUILD_DIR/conf $BUILD_DIR/grafana $BUILD_DIR/autosupport $INSTALL_DIR +RUN cp -aR bin $BUILD_DIR/conf $BUILD_DIR/grafana $BUILD_DIR/autosupport $BUILD_DIR/prom-stack.tmpl $INSTALL_DIR RUN cp -a $BUILD_DIR/container/onePollerPerContainer/docker-compose.tmpl $INSTALL_DIR/container/onePollerPerContainer +RUN cp -aR $BUILD_DIR/container/prometheus $INSTALL_DIR/container/ + FROM gcr.io/distroless/static-debian11:debug ARG INSTALL_DIR=/opt/harvest +ENV HARVEST_DOCKER=yes COPY --from=builder $INSTALL_DIR $INSTALL_DIR WORKDIR $INSTALL_DIR diff --git a/container/onePollerPerContainer/docker-compose.tmpl b/container/onePollerPerContainer/docker-compose.tmpl index 81b782313..9c564f948 100644 --- a/container/onePollerPerContainer/docker-compose.tmpl +++ b/container/onePollerPerContainer/docker-compose.tmpl @@ -7,7 +7,7 @@ services: container_name: {{ .Admin.ContainerName }} restart: unless-stopped ports: - - {{ .Admin.Port }}:{{ .Admin.Port }} + - "{{ .Admin.Port }}:{{ .Admin.Port }}" entrypoint: ["bin/harvest", "admin", "start", "--config", "/opt/harvest.yml"] volumes: - {{ .Admin.ConfigFile }}:/opt/harvest.yml @@ -20,13 +20,12 @@ services: restart: unless-stopped {{- if .ShowPorts}} {{ if .Port }}ports: - - {{ .Port }}:{{ .Port }} + - "{{ .Port }}:{{ .Port }}" {{- end}} {{- end}} command: '--poller {{ .PollerName }} {{if .Port }}--promPort {{ .Port }} {{ end }} {{- if ne .LogLevel 2 }}--loglevel {{ .LogLevel }} {{ end}}--config /opt/harvest.yml' volumes: - - {{ .TemplateDir }}:/opt/harvest/conf - {{ .CertDir }}:/opt/harvest/cert - {{ .ConfigFile }}:/opt/harvest.yml {{- range .Mounts}} diff --git a/docs/assets/extra.css b/docs/assets/extra.css index 087e49381..42625d1f8 100644 --- a/docs/assets/extra.css +++ b/docs/assets/extra.css @@ -1,3 +1,7 @@ .key { color: #1496BB; +} + +.md-typeset h5 { + text-transform: lowercase; } \ No newline at end of file diff --git a/docs/configure-harvest-basic.md b/docs/configure-harvest-basic.md index cf5b5699f..d926daec4 100644 --- a/docs/configure-harvest-basic.md +++ b/docs/configure-harvest-basic.md @@ -13,7 +13,8 @@ All pollers are defined in `harvest.yml`, the main configuration file of Harvest | `exporters` | **required** | List of exporter names from the `Exporters` section. Note: this should be the name of the exporter (e.g. `prometheus1`), not the value of the `exporter` key (e.g. `Prometheus`) | | | `auth_style` | required by Zapi* collectors | Either `basic_auth` or `certificate_auth` See [authentication](#authentication) for details | `basic_auth` | | `username`, `password` | required if `auth_style` is `basic_auth` | | | -| `ssl_cert`, `ssl_key` | optional if `auth_style` is `certificate_auth` | Absolute paths to SSL (client) certificate and key used to authenticate with the target system.

If not provided, the poller will look for `.key` and `.pem` in `$HARVEST_HOME/cert/`.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | +| `ssl_cert`, `ssl_key` | optional if `auth_style` is `certificate_auth` | Paths to SSL (client) certificate and key used to authenticate with the target system.

If not provided, the poller will look for `.key` and `.pem` in `$HARVEST_HOME/cert/`.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | +| `ca_cert` | optional if `auth_style` is `certificate_auth` | Path to file that contains PEM encoded certificates. Harvest will append these certificates to the system-wide set of root certificate authorities (CA).

If not provided, the OS's root CAs will be used.

To create certificates for ONTAP systems, see [using certificate authentication](prepare-cdot-clusters.md#using-certificate-authentication) | | | `use_insecure_tls` | optional, bool | If true, disable TLS verification when connecting to ONTAP cluster | false | | `credentials_file` | optional, string | Path to a yaml file that contains cluster credentials. The file should have the same shape as `harvest.yml`. See [here](configure-harvest-basic.md#credentials-file) for examples. Path can be relative to `harvest.yml` or absolute. | | | `credentials_script` | optional, section | Section that defines how Harvest should fetch credentials via external script. See [here](configure-harvest-basic.md#credentials-script) for details. | | @@ -62,6 +63,66 @@ Tools: #grafana_api_token: 'aaa-bbb-ccc-ddd' ``` +## Poller_files + +Harvest supports loading pollers from multiple files specified in the `Poller_files` section of your `harvest.yml` file. +For example, the following snippet tells harvest to load pollers from all the `*.yml` files under the `configs` directory, +and from the `path/to/single.yml` file. + +Paths may be relative or absolute. + +```yaml +Poller_files: + - configs/*.yml + - path/to/single.yml + +Pollers: + u2: + datacenter: dc-1 +``` + +Each referenced file can contain one or more unique pollers. +Ensure that you include the top-level `Pollers` section in these files. +All other top-level sections will be ignored. +For example: + +```yaml +# contents of configs/00-rtp.yml +Pollers: + ntap3: + datacenter: rtp + + ntap4: + datacenter: rtp +--- +# contents of configs/01-rtp.yml +Pollers: + ntap5: + datacenter: blr +--- +# contents of path/to/single.yml +Pollers: + ntap1: + datacenter: dc-1 + + ntap2: + datacenter: dc-1 +``` + +At runtime, all files will be read and combined into a single configuration. +The example above would result in the following set of pollers, in this order. +```yaml +- u2 +- ntap3 +- ntap4 +- ntap5 +- ntap1 +- ntap2 +``` + +When using glob patterns, the list of matching paths will be sorted before they are read. +Errors will be logged for all duplicate pollers and Harvest will refuse to start. + ## Configuring collectors Collectors are configured by their own configuration files ([templates](configure-templates.md)), which are stored in subdirectories @@ -175,7 +236,7 @@ At runtime, the `credentials_file` will be read and the included credentials wil matching cluster(s). This is handy when integrating with 3rd party credential stores. -See #884 for examples. +See [#884](https://github.com/NetApp/harvest/discussions/884) for examples. The format of the `credentials_file` is similar to `harvest.yml` and can contain multiple cluster credentials. diff --git a/docs/configure-rest.md b/docs/configure-rest.md index 3a77f27e5..f090d3e9e 100644 --- a/docs/configure-rest.md +++ b/docs/configure-rest.md @@ -104,6 +104,50 @@ The Object configuration file ("subtemplate") should contain the following param | `plugins` | list | plugins and their parameters to run on the collected data | | | `export_options` | list | parameters to pass to exporters (see notes below) | | +#### Template Example: + +```yaml +name: Volume +query: api/storage/volumes +object: volume + +counters: + - ^^name => volume + - ^^svm.name => svm + - ^aggregates.#.name => aggr + - ^anti_ransomware.state => antiRansomwareState + - ^state => state + - ^style => style + - space.available => size_available + - space.overwrite_reserve => overwrite_reserve_total + - space.overwrite_reserve_used => overwrite_reserve_used + - space.percent_used => size_used_percent + - space.physical_used => space_physical_used + - space.physical_used_percent => space_physical_used_percent + - space.size => size + - space.used => size_used + - hidden_fields: + - anti_ransomware.state + - space + - filter: + - name=*harvest* + +plugins: + - LabelAgent: + exclude_equals: + - style `flexgroup_constituent` + +export_options: + instance_keys: + - aggr + - style + - svm + - volume + instance_labels: + - antiRansomwareState + - state +``` + #### `counters` This section defines the list of counters that will be collected. These counters can be labels, numeric metrics or @@ -113,6 +157,24 @@ The display name of a counter can be changed with `=>` (e.g., `space.block_stora Counters that are stored as labels will only be exported if they are included in the `export_options` section. +The `counters` section allows you to specify `hidden_fields` and `filter` parameters. Please find the detailed explanation below. + +##### `hidden_fields` + +There are some fields that ONTAP will not return unless you explicitly ask for them, even when using the URL parameter `fields=**`. `hidden_fields` is how you tell ONTAP which additional fields it should include in the REST response. + +##### `filter` + +The `filter` is used to constrain the data returned by the endpoint, allowing for more targeted data retrieval. The filtering uses ONTAP's REST record filtering. The example above asks ONTAP to only return records where a volume's name matches `*harvest*`. + +If you're familiar with ONTAP's REST record filtering, the [example](#template-example) above would become `name=*harvest*` and appended to the final URL like so: + +``` +https://CLUSTER_IP/api/storage/volumes?fields=*,anti_ransomware.state,space&name=*harvest* +``` + +Refer to the ONTAP API specification, sections: `query parameters` and `record filtering`, for more details. + #### `export_options` Parameters in this section tell the exporters how to handle the collected data. The set of parameters varies by diff --git a/docs/configure-templates.md b/docs/configure-templates.md index 2e348b50d..8be917082 100644 --- a/docs/configure-templates.md +++ b/docs/configure-templates.md @@ -178,7 +178,7 @@ sensor_value{datacenter="WDRF",cluster="shopfloor",node="shopfloor-02",sensor="P ## Extend an existing object template -### How to extend a Rest/RestPerf/Ems collector's existing object template +### How to extend a Rest/RestPerf/StorageGRID/Ems collector's existing object template Instead of editing one of the existing templates, it's better to copy one and edit the copy. That way, your custom template will not be overwritten when upgrading Harvest. For example, if you want to diff --git a/docs/configure-zapi.md b/docs/configure-zapi.md index a996c6c0a..5cd47f3e1 100644 --- a/docs/configure-zapi.md +++ b/docs/configure-zapi.md @@ -127,16 +127,16 @@ configuration (explained in the next section). Additionally, this file contains the parameters that are applied as defaults to all objects. (As mentioned before, any of these parameters can be defined in the Harvest or object configuration files as well). -| parameter | type | description | default | -|--------------------|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| -| `use_insecure_tls` | bool, optional | skip verifying TLS certificate of the target system | `false` | -| `client_timeout` | duration (Go-syntax) | how long to wait for server responses | 30s | -| `batch_size` | int, optional | max instances per API request | `500` | -| `latency_io_reqd` | int, optional | threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) | `100` | -| `schedule` | list, required | the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: | | -| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache (example value: `1200s` = `20m`) | | -| - `instance` | duration (Go-syntax) | poll frequency of updating the instance cache (example value: `600s` = `10m`) | | -| - `data` | duration (Go-syntax) | poll frequency of updating the data cache (example value: `60s` = `1m`)

**Note** Harvest allows defining poll intervals on sub-second level (e.g. `1ms`), however keep in mind the following:
  • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than `client_timeout`.
  • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
  • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
| | +| parameter | type | description | default | +|--------------------|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------| +| `use_insecure_tls` | bool, optional | skip verifying TLS certificate of the target system | `false` | +| `client_timeout` | duration (Go-syntax) | how long to wait for server responses | 30s | +| `batch_size` | int, optional | max instances per API request | `500` | +| `latency_io_reqd` | int, optional | threshold of IOPs for calculating latency metrics (latencies based on very few IOPs are unreliable) | `100` | +| `schedule` | list, required | the poll frequencies of the collector/object, should include exactly these three elements in the exact same other: | | +| - `counter` | duration (Go-syntax) | poll frequency of updating the counter metadata cache (example value: `20m`) | | +| - `instance` | duration (Go-syntax) | poll frequency of updating the instance cache (example value: `10m`) | | +| - `data` | duration (Go-syntax) | poll frequency of updating the data cache (example value: `1m`)

**Note** Harvest allows defining poll intervals on sub-second level (e.g. `1ms`), however keep in mind the following:
  • API response of an ONTAP system can take several seconds, so the collector is likely to enter failed state if the poll interval is less than `client_timeout`.
  • Small poll intervals will create significant workload on the ONTAP system, as many counters are aggregated on-demand.
  • Some metric values become less significant if they are calculated for very short intervals (e.g. latencies)
| | The template should define objects in the `objects` section. Example: diff --git a/docs/help/troubleshooting.md b/docs/help/troubleshooting.md index dcc37ae83..e5d41236a 100644 --- a/docs/help/troubleshooting.md +++ b/docs/help/troubleshooting.md @@ -105,8 +105,8 @@ There are a few parameters that you can change to avoid this from happening. You Increase the `client_timeout` value by adding a `client_timeout` line at the beginning of the template, like so: ```yaml -# increase the timeout to 60 seconds -client_timeout: 60s +# increase the timeout to 1 minute +client_timeout: 1m ``` #### `batch_size` @@ -120,24 +120,24 @@ batch_size: 200 #### `schedule` -If nothing else helps, you can increase the data poll interval of the collector (default is `60s` for ZapiPerf and `180s` for Zapi). You can do this either by adding a `schedule` attribute to the template or, if it already exists, by changing the `- data` line. +If nothing else helps, you can increase the data poll interval of the collector (default is `1m` for ZapiPerf and `3m` for Zapi). You can do this either by adding a `schedule` attribute to the template or, if it already exists, by changing the `- data` line. Example for ZapiPerf: ```yaml # increase data poll frequency to 2 minutes schedule: - - counter: 1200s - - instance: 600s - - data: 120s + - counter: 20m + - instance: 10m + - data: 2m ``` Example for Zapi: ```yaml # increase data poll frequency to 5 minutes schedule: - - instance: 600s - - data: 300s + - instance: 10m + - data: 5m ``` ## Prometheus HTTP Service Discovery doesn't work diff --git a/docs/install/containerd.md b/docs/install/containerd.md index 7b5532287..0f6668428 100644 --- a/docs/install/containerd.md +++ b/docs/install/containerd.md @@ -47,8 +47,10 @@ Create your `harvest-compose.yml` file like this: ```sh docker run --rm \ --entrypoint "bin/harvest" \ - --volume "$(pwd):/opt/harvest" \ - ghcr.io/netapp/harvest generate docker full \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker full \ --output harvest-compose.yml # --image tag, if you built a new image above ``` diff --git a/docs/install/containers.md b/docs/install/containers.md index 9b8e90571..dc9930906 100644 --- a/docs/install/containers.md +++ b/docs/install/containers.md @@ -5,9 +5,8 @@ Harvest is container-ready and supports several deployment options: - [Stand-up Prometheus, Grafana, and Harvest via Docker Compose](#docker-compose). Choose this if you want to hit the ground running. Install, volume and network mounts automatically handled. -- [Poller-per-container model](https://github.com/NetApp/harvest/tree/main/container/onePollerPerContainer) that offers - more flexibility in configuration. This deployment enables a broad range of orchestrators (Nomad, Mesosphere, Swarm, - K8, etc.) since you pick-and-choose what gets built and how it's deployed, stronger familiarity with containers is +- [Stand-up Harvest via Docker Compose](harvest-containers.md) that offers + more flexibility in configuration. Choose this if you only want to run Harvest containers. Since you pick-and-choose what gets built and how it's deployed, stronger familiarity with containers is recommended. - If you prefer Ansible, David Blackwell created @@ -15,10 +14,10 @@ Harvest is container-ready and supports several deployment options: stands up Harvest, Grafana, and Prometheus. - Want to run Harvest on a Mac - via [containerd and Racher Desktop](https://github.com/NetApp/harvest/tree/main/container/containerd)? We got you + via [containerd and Racher Desktop](containerd.md)? We got you covered. -- [K8 Deployment](https://github.com/NetApp/harvest/blob/main/container/k8/README.md) via Kompose +- [K8 Deployment](k8.md) via Kompose ## Docker Compose @@ -29,11 +28,6 @@ This is a quick way to install and get started with Harvest. Follow the four ste - A separate poller container is created for each monitored cluster - All pollers are automatically added as Prometheus scrape targets -### Download and untar - -- Download the latest version of [Harvest](https://netapp.github.io/harvest/latest/install/native/), untar, and - cd into the harvest directory. - ### Setup harvest.yml - Create a `harvest.yml` file with your cluster details, below is an example with annotated comments. Modify as needed @@ -73,20 +67,39 @@ Pollers: - Generate a Docker compose file from your `harvest.yml` -``` +```sh docker run --rm \ --entrypoint "bin/harvest" \ - --volume "$(pwd):/opt/harvest" \ - ghcr.io/netapp/harvest generate docker full \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker full \ --output harvest-compose.yml ``` +By default, the above command uses the harvest configuration file(`harvest.yml`) located in the current directory. If you want to use a harvest config from a different location. +??? question "What if my harvest configuration file is somewhere else or not named harvest.yml" + Use the following docker run command, updating the `HYML` variable with the absolute path to your `harvest.yml`. + + ```sh + HYML="/opt/custom_harvest.yml" \ + docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "${HYML}:${HYML}" \ + ghcr.io/netapp/harvest:latest \ + generate docker full \ + --output harvest-compose.yml \ + --config "${HYML}" + ``` + `generate docker full` does two things: 1. Creates a Docker compose file with a container for each Harvest poller defined in your `harvest.yml` 2. Creates a matching Prometheus service discovery file for each Harvest poller (located in `container/prometheus/harvest_targets.yml`). Prometheus uses this file to scrape the Harvest pollers. + ### Start everything Bring everything up :rocket: @@ -100,7 +113,7 @@ docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans The `prom-stack.yml` compose file creates a `frontend` and `backend` network. Prometheus and Grafana publish their admin ports on the front-end network and are routable to the local machine. By default, the Harvest pollers are part of the backend network and also expose their Prometheus web end-points. -If you do not want their end-points exposed, remove the `--port` option from the `generate` sub-command in the [previous step](#generate-a-docker-compose-for-your-pollers). +If you do not want their end-points exposed, add the `--port=false` option to the `generate` sub-command in the [previous step](#generate-a-docker-compose-for-your-pollers). ### Prometheus @@ -162,15 +175,15 @@ Note: Deleting or stopping Docker containers does not remove the data stored in To upgrade Harvest: -1. Download the latest `tar.gz` or packaged version and install it. - This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker +1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker image. + ``` + docker pull ghcr.io/netapp/harvest + ``` 2. [Stop all containers](#stop-all-containers) -3. Copy your existing `harvest.yml` into the new Harvest directory created in step #1. - -4. Regenerate your `harvest-compose.yml` file by +3. Regenerate your `harvest-compose.yml` file by running [harvest generate](#generate-a-docker-compose-for-your-pollers) By default, generate will use the `latest` tag. If you want to upgrade to a `nightly` build see the twisty. @@ -178,11 +191,19 @@ To upgrade Harvest: Tell the `generate` cmd to use a different tag like so: - `docker run --rm --entrypoint "bin/harvest" --volume "$(pwd):/opt/harvest" ghcr.io/netapp/harvest:nightly generate docker full --output harvest-compose.yml` + ```sh + docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest:nightly \ + generate docker full \ + --image ghcr.io/netapp/harvest:nightly \ + --output harvest-compose.yml + ``` -5. Pull new images and restart your containers like so: +4. Restart your containers using the following: ``` -docker pull ghcr.io/netapp/harvest # or if using Docker Hub: docker pull rahulguptajss/harvest docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans ``` diff --git a/docs/install/harvest-containers.md b/docs/install/harvest-containers.md new file mode 100644 index 000000000..78e6675e8 --- /dev/null +++ b/docs/install/harvest-containers.md @@ -0,0 +1,114 @@ +Follow this method if your goal is to establish a separate harvest container for each poller defined in `harvest.yml` file. Please note that these containers must be incorporated into your current infrastructure, which might include systems like Prometheus or Grafana. + +### Setup harvest.yml + +- Create a `harvest.yml` file with your cluster details, below is an example with annotated comments. Modify as needed + for your scenario. + +This config is using the Prometheus +exporter [port_range](../prometheus-exporter.md#port_range) +feature, so you don't have to manage the Prometheus exporter port mappings for each poller. + +``` +Exporters: + prometheus1: + exporter: Prometheus + addr: 0.0.0.0 + port_range: 2000-2030 # <====== adjust to be greater than equal to the number of monitored clusters + +Defaults: + collectors: + - Zapi + - ZapiPerf + - EMS + use_insecure_tls: true # <====== adjust as needed to enable/disable TLS checks + exporters: + - prometheus1 + +Pollers: + infinity: # <====== add your cluster(s) here, they use the exporter defined three lines above + datacenter: DC-01 + addr: 10.0.1.2 + auth_style: basic_auth + username: user + password: 123#abc + # next cluster .... +``` + +### Generate a Docker compose for your Pollers + +- Generate a Docker compose file from your `harvest.yml` + +```sh +docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker \ + --output harvest-compose.yml +``` + +### Start everything + +Bring everything up :rocket: + +``` +docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans +``` + + +## Manage pollers + +### How do I add a new poller? + +1. Add poller to `harvest.yml` +2. Regenerate compose file by running [harvest generate](#generate-a-docker-compose-for-your-pollers) +3. Run [docker compose up](#start-everything), for example, + +```bash +docker-compose -f harvest-compose.yml up -d --remove-orphans +``` + +### Stop all containers + +``` +docker-compose-f harvest-compose.yml down +``` + +### Upgrade Harvest + +To upgrade Harvest: + +1. Retrieve the most recent version of the Harvest Docker image by executing the following command.This is needed since the new version may contain new templates, dashboards, or other files not included in the Docker + image. + ``` + docker pull ghcr.io/netapp/harvest + ``` + +2. [Stop all containers](#stop-all-containers) + +3. Regenerate your `harvest-compose.yml` file by + running [harvest generate](#generate-a-docker-compose-for-your-pollers) + By default, generate will use the `latest` tag. If you want to upgrade to a `nightly` build see the twisty. + + ??? question "I want to upgrade to a nightly build" + + Tell the `generate` cmd to use a different tag like so: + + ```sh + docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest:nightly \ + generate docker \ + --image ghcr.io/netapp/harvest:nightly \ + --output harvest-compose.yml + ``` + +4. Restart your containers using the following: + + ``` + docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans + ``` diff --git a/docs/install/k8.md b/docs/install/k8.md index 83c921798..3d6cadba6 100644 --- a/docs/install/k8.md +++ b/docs/install/k8.md @@ -5,11 +5,6 @@ The following steps are provided for reference purposes only. Depending on the s ### Requirements - [Kompose](https://github.com/kubernetes/kompose/): `v1.25` or higher -### Download and untar Harvest - -- Download the latest version of [Harvest](https://netapp.github.io/harvest/latest/install/native/), untar, and - cd into the harvest directory. - ## Deployment * [Local k8 Deployment](#local-k8-deployment) @@ -24,8 +19,10 @@ To run Harvest resources in Kubernetes, please execute the following commands: ``` docker run --rm \ --entrypoint "bin/harvest" \ - --volume "$(pwd):/opt/harvest" \ - ghcr.io/netapp/harvest generate docker full \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker full \ --output harvest-compose.yml ``` @@ -411,8 +408,10 @@ Please note the following assumptions for the steps below: ``` docker run --rm \ --entrypoint "bin/harvest" \ - --volume "$(pwd):/opt/harvest" \ - ghcr.io/netapp/harvest generate docker full \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker full \ --output harvest-compose.yml sed -i '/\/conf/s/^/#/g' harvest-compose.yml diff --git a/docs/install/podman.md b/docs/install/podman.md index e05b67bf9..4d4d4b805 100644 --- a/docs/install/podman.md +++ b/docs/install/podman.md @@ -77,9 +77,26 @@ podman info | grep runRoot By default, Cockpit runs on port 9090, same as Prometheus. We'll change Prometheus's host port to 9091 so we can run both Cockpit and Prometheus. Line `2` below does that. With these changes, the [standard Harvest compose instructions](../containers/#docker-compose) can be followed as normal now. In summary, + 1. Add the clusters, exporters, etc. to your `harvest.yml` file -2. Generate a compose file from your `harvest.yml` by running `docker run --rm --entrypoint "bin/harvest" --volume "$(pwd):/opt/harvest" ghcr.io/netapp/harvest generate docker full --output harvest-compose.yml --promPort 9091` -3. Bring everything up with `docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans` +2. Generate a compose file from your `harvest.yml` by running + + ```sh + docker run --rm \ + --entrypoint "bin/harvest" \ + --volume "$(pwd):/opt/temp" \ + --volume "$(pwd)/harvest.yml:/opt/harvest/harvest.yml" \ + ghcr.io/netapp/harvest \ + generate docker full \ + --output harvest-compose.yml \ + --promPort 9091 + ``` + +3. Bring everything up :rocket: + + ``` + docker-compose -f prom-stack.yml -f harvest-compose.yml up -d --remove-orphans + ``` After starting the containers, you can view them with `podman ps -a` or using Cockpit `https://host-ip:9090/podman`. diff --git a/docs/prometheus-exporter.md b/docs/prometheus-exporter.md index 90cc14d0a..aab006688 100644 --- a/docs/prometheus-exporter.md +++ b/docs/prometheus-exporter.md @@ -41,7 +41,7 @@ An overview of all parameters: | `global_prefix` | string, optional | add a prefix to all metrics (e.g. `netapp_`) | | | `allow_addrs` | list of strings, optional | allow access only if host matches any of the provided addresses | | | `allow_addrs_regex` | list of strings, optional | allow access only if host address matches at least one of the regular expressions | | -| `cache_max_keep` | string (Go duration format), optional | maximum amount of time metrics are cached (in case Prometheus does not timely collect the metrics) | `300s` | +| `cache_max_keep` | string (Go duration format), optional | maximum amount of time metrics are cached (in case Prometheus does not timely collect the metrics) | `5m` | | `add_meta_tags` | bool, optional | add `HELP` and `TYPE` [metatags](https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information) to metrics (currently no useful information, but required by some tools) | `false` | | `sort_labels` | bool, optional | sort metric labels before exporting. Some [open-metrics scrapers report](https://github.com/NetApp/harvest/issues/756) stale metrics when labels are not sorted. | `false` | | `tls` | `tls` | optional | If present, enables TLS transport. If running in a container, see [note](https://github.com/NetApp/harvest/issues/672#issuecomment-1036338589) | @@ -295,7 +295,7 @@ Scroll down to near the end of file and add the following lines: ``` **NOTE** If Prometheus is not on the same machine as Harvest, then replace `localhost` with the IP address of your -Harvest machine. Also note the scrape interval above is set to 60s. That matches the polling frequency of the default +Harvest machine. Also note the scrape interval above is set to 1m. That matches the polling frequency of the default Harvest collectors. If you change the polling frequency of a Harvest collector to a lower value, you should also change the scrape interval. diff --git a/docs/resources/power-algorithm.md b/docs/resources/power-algorithm.md new file mode 100644 index 000000000..e90c4eb70 --- /dev/null +++ b/docs/resources/power-algorithm.md @@ -0,0 +1,143 @@ + +Gathering power metrics requires a cluster with: + +* ONTAP versions 9.6+ +* [REST enabled](../prepare-cdot-clusters.md), even when using the ZAPI collector + +REST is required because it is the only way to collect chassis field-replaceable-unit (FRU) information via the +REST API `/api/private/cli/system/chassis/fru`. + +## How does Harvest calculate cluster power? + +Cluster power is the sum of a cluster's [node(s) power](#node-power) + +the sum of attached [disk shelve(s) power](#disk-shelf-power). + +Redundant power supplies (PSU) load-share the total load. +With n PSUs, each PSU does roughly (1/n) the work +(the actual amount is slightly more than a single PSU due to additional fans.) + +## Node power + +Node power is calculated by collecting power supply unit (PSU) power, as reported by REST +`/api/private/cli/system/environment/sensors` or by ZAPI `environment-sensors-get-iter`. + +When a power supply is shared between controllers, +the PSU's power will be evenly divided across the controllers due to load-sharing. + +For example: + +* FAS2750 models have two power supplies that power both controllers. Each PSU is shared between the two controllers. +* A800 models have four power supplies. `PSU1` and `PSU2` power `Controller1` and `PSU3` and `PSU4` power `Controller2`. Each PSU provides power to a single controller. + +Harvest determines whether a PSU is shared between controllers by consulting the `connected_nodes` of each PSU, +as reported by ONTAP via `/api/private/cli/system/chassis/fru` + +## Disk shelf power + +Disk shelf power is calculated by collecting `psu.power_drawn`, as reported by REST, via +`/api/storage/shelves` or `sensor-reading`, as reported by ZAPI `storage-shelf-info-get-iter`. + +The power for [embedded shelves](https://kb.netapp.com/onprem/ontap/hardware/FAQ%3A_How_do_shelf_product_IDs_and_modules_in_ONTAP_map_to_a_model_of_a_shelf_or_storage_system_with_embedded_storage) +is ignored, since that power is already accounted for in the controller's power draw. + +## Examples + +### FAS2750 + +``` +# Power Metrics for 10.61.183.200 + +## ONTAP version NetApp Release 9.8P16: Fri Dec 02 02:05:05 UTC 2022 + +## Nodes +system show + Node | Model | SerialNumber +----------------------+---------+--------------- +cie-na2750-g1344-01 | FAS2750 | 621841000123 +cie-na2750-g1344-02 | FAS2750 | 621841000124 + +## Chassis +system chassis fru show + ChassisId | Name | Fru | Type | Status | NumNodes | ConnectedNodes +---------------+-----------------+---------------------+------------+--------+----------+------------------------------------------- +021827030435 | 621841000123 | cie-na2750-g1344-01 | controller | ok | 1 | cie-na2750-g1344-01 +021827030435 | 621841000124 | cie-na2750-g1344-02 | controller | ok | 1 | cie-na2750-g1344-02 +021827030435 | PSQ094182201794 | PSU2 FRU | psu | ok | 2 | cie-na2750-g1344-02, cie-na2750-g1344-01 +021827030435 | PSQ094182201797 | PSU1 FRU | psu | ok | 2 | cie-na2750-g1344-02, cie-na2750-g1344-01 + +## Sensors +system environment sensors show +(filtered by power, voltage, current) + Node | Name | Type | State | Value | Units +----------------------+---------------+---------+--------+-------+-------- +cie-na2750-g1344-01 | PSU1 12V Curr | current | normal | 9920 | mA +cie-na2750-g1344-01 | PSU1 12V | voltage | normal | 12180 | mV +cie-na2750-g1344-01 | PSU1 5V Curr | current | normal | 4490 | mA +cie-na2750-g1344-01 | PSU1 5V | voltage | normal | 5110 | mV +cie-na2750-g1344-01 | PSU2 12V Curr | current | normal | 9140 | mA +cie-na2750-g1344-01 | PSU2 12V | voltage | normal | 12100 | mV +cie-na2750-g1344-01 | PSU2 5V Curr | current | normal | 4880 | mA +cie-na2750-g1344-01 | PSU2 5V | voltage | normal | 5070 | mV +cie-na2750-g1344-02 | PSU1 12V Curr | current | normal | 9920 | mA +cie-na2750-g1344-02 | PSU1 12V | voltage | normal | 12180 | mV +cie-na2750-g1344-02 | PSU1 5V Curr | current | normal | 4330 | mA +cie-na2750-g1344-02 | PSU1 5V | voltage | normal | 5110 | mV +cie-na2750-g1344-02 | PSU2 12V Curr | current | normal | 9170 | mA +cie-na2750-g1344-02 | PSU2 12V | voltage | normal | 12100 | mV +cie-na2750-g1344-02 | PSU2 5V Curr | current | normal | 4720 | mA +cie-na2750-g1344-02 | PSU2 5V | voltage | normal | 5070 | mV + +## Shelf PSUs +storage shelf show +Shelf | ProductId | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded +------+-----------+------------+-------+--------------+---------------+--------- + 1.0 | DS224-12 | iom12e | 1,2 | true,true | 1397,1318 | true + +### Controller Power From Sum(InVoltage * InCurrent)/NumNodes +Power: 256W +``` + +### AFF A800 + +``` +# Power Metrics for 10.61.124.110 + +## ONTAP version NetApp Release 9.13.1P1: Tue Jul 25 10:19:28 UTC 2023 + +## Nodes +system show + Node | Model | SerialNumber +----------+----------+------------- +a800-1-01 | AFF-A800 | 941825000071 +a800-1-02 | AFF-A800 | 941825000072 + +## Chassis +system chassis fru show + ChassisId | Name | Fru | Type | Status | NumNodes | ConnectedNodes +----------------+----------------+-----------+------------+--------+----------+--------------- +SHFFG1826000154 | 941825000071 | a800-1-01 | controller | ok | 1 | a800-1-01 +SHFFG1826000154 | 941825000072 | a800-1-02 | controller | ok | 1 | a800-1-02 +SHFFG1826000154 | EEQT1822002800 | PSU1 FRU | psu | ok | 1 | a800-1-02 +SHFFG1826000154 | EEQT1822002804 | PSU2 FRU | psu | ok | 1 | a800-1-02 +SHFFG1826000154 | EEQT1822002805 | PSU2 FRU | psu | ok | 1 | a800-1-01 +SHFFG1826000154 | EEQT1822002806 | PSU1 FRU | psu | ok | 1 | a800-1-01 + +## Sensors +system environment sensors show +(filtered by power, voltage, current) + Node | Name | Type | State | Value | Units +----------+---------------+---------+--------+-------+------ +a800-1-01 | PSU1 Power In | unknown | normal | 376 | W +a800-1-01 | PSU2 Power In | unknown | normal | 411 | W +a800-1-02 | PSU1 Power In | unknown | normal | 383 | W +a800-1-02 | PSU2 Power In | unknown | normal | 433 | W + +## Shelf PSUs +storage shelf show +Shelf | ProductId | ModuleType | PSUId | PSUIsEnabled | PSUPowerDrawn | Embedded +------+-------------+------------+-------+--------------+---------------+--------- + 1.0 | FS4483PSM3E | psm3e | | | | true + +### Controller Power From Sum(InPower sensors) +Power: 1603W +``` \ No newline at end of file diff --git a/go.mod b/go.mod index adddf84e0..580670dd8 100644 --- a/go.mod +++ b/go.mod @@ -8,22 +8,23 @@ require ( github.com/go-openapi/spec v0.20.9 github.com/hashicorp/go-version v1.6.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/rs/zerolog v1.30.0 - github.com/shirou/gopsutil/v3 v3.23.7 + github.com/rs/zerolog v1.31.0 + github.com/shirou/gopsutil/v3 v3.23.9 github.com/spf13/cobra v1.7.0 - github.com/tidwall/gjson v1.16.0 + github.com/tidwall/gjson v1.17.0 github.com/tidwall/pretty v1.2.1 github.com/tidwall/sjson v1.2.5 github.com/zekroTJA/timedmap v1.5.1 - golang.org/x/sys v0.11.0 - golang.org/x/term v0.11.0 - golang.org/x/text v0.12.0 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/sys v0.12.0 + golang.org/x/term v0.12.0 + golang.org/x/text v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect @@ -33,14 +34,14 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tidwall/match v1.1.1 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect ) diff --git a/go.sum b/go.sum index d2202a457..edbdeb794 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -51,16 +52,14 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= @@ -76,11 +75,11 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= -github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= -github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -102,10 +101,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw= -github.com/tidwall/gjson v1.15.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= -github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= @@ -113,30 +110,29 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zekroTJA/timedmap v1.5.1 h1:s9SI1T8gl1OAfDw9LKZYMfbhNqqCIOhZTfhsHgpKHMw= github.com/zekroTJA/timedmap v1.5.1/go.mod h1:Go4uPxMN1Wjl5IgO6HYD1tM9IQhkYEVqcrrdsI4ljXo= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/grafana/dashboards/7mode/lun7.json b/grafana/dashboards/7mode/lun7.json index 32357d25e..75c5519db 100644 --- a/grafana/dashboards/7mode/lun7.json +++ b/grafana/dashboards/7mode/lun7.json @@ -1286,7 +1286,7 @@ "type": "table" } ], - "title": "LUN Table Drilldown", + "title": "LUN Table", "type": "row" }, { @@ -1849,7 +1849,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Drilldown", + "title": "Top LUN Performance", "type": "row" }, { @@ -2068,7 +2068,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Efficiency Drilldown", + "title": "Top LUN Performance Efficiency", "type": "row" }, { @@ -2352,7 +2352,7 @@ "type": "timeseries" } ], - "title": "Top Volume and LUN Capacity Drilldown", + "title": "Top Volume and LUN Capacity", "type": "row" }, { @@ -2743,7 +2743,7 @@ "type": "timeseries" } ], - "title": "Per LUN Drilldown (Must Select Node/SVM/Volume/LUN)", + "title": "Per LUN (Must Select Node/SVM/Volume/LUN)", "type": "row" } ], diff --git a/grafana/dashboards/7mode/network7.json b/grafana/dashboards/7mode/network7.json index 7073c8e44..bfa68d734 100644 --- a/grafana/dashboards/7mode/network7.json +++ b/grafana/dashboards/7mode/network7.json @@ -1352,7 +1352,7 @@ "type": "timeseries" } ], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -1975,7 +1975,7 @@ "type": "timeseries" } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" } ], diff --git a/grafana/dashboards/7mode/node7.json b/grafana/dashboards/7mode/node7.json index e356ad4c5..b237f8fdc 100644 --- a/grafana/dashboards/7mode/node7.json +++ b/grafana/dashboards/7mode/node7.json @@ -1520,7 +1520,7 @@ "type": "timeseries" } ], - "title": "Backend Drilldown", + "title": "Backend", "type": "row" }, { @@ -1636,7 +1636,7 @@ "type": "timeseries" } ], - "title": "CPU Layer Drilldown", + "title": "CPU Layer", "type": "row" }, { @@ -2015,7 +2015,7 @@ "type": "timeseries" } ], - "title": "Network Layer Drilldown", + "title": "Network Layer", "type": "row" }, { @@ -2243,7 +2243,7 @@ "type": "timeseries" } ], - "title": "CIFS Frontend Drilldown", + "title": "CIFS Frontend", "type": "row" }, { @@ -2794,7 +2794,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Frontend Drilldown", + "title": "NFSv3 Frontend", "type": "row" }, { @@ -3112,7 +3112,7 @@ "type": "timeseries" } ], - "title": "iSCSI Frontend Drilldown", + "title": "iSCSI Frontend", "type": "row" } ], diff --git a/grafana/dashboards/7mode/volume7.json b/grafana/dashboards/7mode/volume7.json index dc52f6477..0d69f7354 100644 --- a/grafana/dashboards/7mode/volume7.json +++ b/grafana/dashboards/7mode/volume7.json @@ -1506,7 +1506,7 @@ "type": "table" } ], - "title": "Volume Table Drilldown", + "title": "Volume Table", "type": "row" }, { @@ -2066,7 +2066,7 @@ "type": "timeseries" } ], - "title": "Backend WAFL Drilldown", + "title": "Backend WAFL", "type": "row" }, { diff --git a/grafana/dashboards/cmode/aggregate.json b/grafana/dashboards/cmode/aggregate.json index 205142dba..34730c409 100644 --- a/grafana/dashboards/cmode/aggregate.json +++ b/grafana/dashboards/cmode/aggregate.json @@ -3913,7 +3913,7 @@ "type": "timeseries" } ], - "title": "Flash Pool Drilldown", + "title": "Flash Pool", "type": "row" }, { @@ -3925,23 +3925,695 @@ "x": 0, "y": 57 }, - "id": 81, + "id": 810, "panels": [ { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 195, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggregatePerformanceTierFootprint\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Performance Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 82 + }, + "id": 197, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggregatePerformanceTierFootprintPerc\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Performance Tier Footprint %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 94 + }, + "id": 199, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$TopAggregateCapacityTierFootprint\"})", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}} - {{aggr}}", + "refId": "A" + } + ], + "title": "Top $TopResources Aggregates by Capacity Tier Footprint", + "transformations": [], + "type": "timeseries" + } + ], + "title": "FabricPool", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 106 + }, + "id": 81, + "panels": [ + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Space Used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "opacity", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 107 + }, + "id": 83, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsed\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "D" + } + ], + "title": "Top $TopResources Volumes by Space Used by Aggregate", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Space Used %", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 107 + }, + "id": 84, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsedPercent\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Space Used %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Snapshot Space Used", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 119 + }, + "id": 87, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsed\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Snapshot Space Used", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.\n\nNote that in some scenarios, it is possible to exceed 100% of the space allocated.\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "Snapshot Space Used %", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 119 + }, + "id": 85, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsedPercent\"})", + "hide": false, + "interval": "", + "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "refId": "C" + } + ], + "title": "Top $TopResources Volumes by Snapshot Space Used %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Space Used", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, - "gradientMode": "opacity", + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, @@ -3971,6 +4643,10 @@ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] }, @@ -3982,9 +4658,9 @@ "h": 12, "w": 12, "x": 0, - "y": 82 + "y": 131 }, - "id": 83, + "id": 95, "options": { "legend": { "calcs": [ @@ -4006,27 +4682,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeSizeUsed\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsed\"})", + "expr": "topk($TopResources, (volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumePerformanceTierFootprint\"}))", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", - "refId": "D" + "legendFormat": "{{volume}} ", + "refId": "A" } ], - "title": "Top $TopResources Volumes by Space Used by Aggregate", + "title": "Top $TopResources Volumes by Performance Tier Footprint", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Space Used %", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4077,9 +4753,9 @@ "h": 12, "w": 12, "x": 12, - "y": 82 + "y": 131 }, - "id": 84, + "id": 97, "options": { "legend": { "calcs": [ @@ -4101,27 +4777,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeSizeUsedPercent\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSizeUsedPercent\"})", + "expr": "topk($TopResources, (volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumePerformanceTierFootprintPerc\"}))", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "legendFormat": "{{volume}}", "refId": "A" } ], - "title": "Top $TopResources Volumes by Space Used %", + "title": "Top $TopResources Volumes by Performance Tier Footprint %", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Snapshot Space Used", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4171,9 +4847,9 @@ "h": 12, "w": 12, "x": 0, - "y": 94 + "y": 143 }, - "id": 87, + "id": 99, "options": { "legend": { "calcs": [ @@ -4195,27 +4871,27 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeSnapshotSizeUsed\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsed\"})", + "expr": "topk($TopResources, volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeCapacityTierFootprint\"})", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", + "legendFormat": "{{volume}} ", "refId": "A" } ], - "title": "Top $TopResources Volumes by Snapshot Space Used", + "title": "Top $TopResources Volumes by Capacity Tier Footprint", "transformations": [], "type": "timeseries" }, { "datasource": "${DS_PROMETHEUS}", - "description": "Flexgroup by-aggregate filtering does not display the per-aggregate breakdown, instead the sum of all flexgroup aggregates is displayed. This is how ONTAP reports the data, even when an aggregate is selected in the dropdown.\n\nNote that in some scenarios, it is possible to exceed 100% of the space allocated.\n", + "description": "", "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { - "axisLabel": "Snapshot Space Used %", + "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", @@ -4266,9 +4942,9 @@ "h": 12, "w": 12, "x": 12, - "y": 94 + "y": 143 }, - "id": 85, + "id": 101, "options": { "legend": { "calcs": [ @@ -4290,14 +4966,14 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeSnapshotSizeUsedPercent\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeSnapshotSizeUsedPercent\"})", + "expr": "topk($TopResources, volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",volume=~\"$TopVolumeCapacityTierFootprintPerc\"})", "hide": false, "interval": "", - "legendFormat": "{{aggr}} - {{volume}} - {{style}}", - "refId": "C" + "legendFormat": "{{volume}}", + "refId": "A" } ], - "title": "Top $TopResources Volumes by Snapshot Space Used %", + "title": "Top $TopResources Volumes by Capacity Tier Footprint %", "transformations": [], "type": "timeseries" } @@ -4312,7 +4988,7 @@ "h": 1, "w": 24, "x": 0, - "y": 58 + "y": 155 }, "id": 28, "panels": [ @@ -4374,7 +5050,7 @@ "h": 9, "w": 8, "x": 0, - "y": 6 + "y": 156 }, "id": 88, "options": { @@ -4395,7 +5071,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeReadLatency\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadLatency\"})", + "expr": "topk($TopResources, volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadLatency\"})", "interval": "", "legendFormat": "{{aggr}} - {{volume}} - {{style}}", "refId": "A" @@ -4463,7 +5139,7 @@ "h": 9, "w": 8, "x": 8, - "y": 6 + "y": 156 }, "id": 89, "options": { @@ -4484,7 +5160,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeReadThroughput\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadThroughput\"})", + "expr": "topk($TopResources, volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadThroughput\"})", "interval": "", "intervalFactor": 1, "legendFormat": "{{aggr}} - {{volume}} - {{style}}", @@ -4553,7 +5229,7 @@ "h": 9, "w": 8, "x": 16, - "y": 6 + "y": 156 }, "id": 90, "options": { @@ -4574,7 +5250,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeReadIOPS\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadIOPS\"})", + "expr": "topk($TopResources, volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeReadIOPS\"})", "interval": "", "legendFormat": "{{aggr}} - {{volume}} - {{style}}", "refId": "A" @@ -4642,7 +5318,7 @@ "h": 9, "w": 8, "x": 0, - "y": 15 + "y": 165 }, "id": 91, "options": { @@ -4663,7 +5339,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeWriteLatency\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteLatency\"})", + "expr": "topk($TopResources, volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteLatency\"})", "interval": "", "legendFormat": "{{aggr}} - {{volume}} - {{style}}", "refId": "A" @@ -4731,7 +5407,7 @@ "h": 9, "w": 8, "x": 8, - "y": 15 + "y": 165 }, "id": 92, "options": { @@ -4752,7 +5428,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeWriteThroughput\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteThroughput\"})", + "expr": "topk($TopResources, volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteThroughput\"})", "interval": "", "legendFormat": "{{aggr}} - {{volume}} - {{style}}", "refId": "A" @@ -4820,7 +5496,7 @@ "h": 9, "w": 8, "x": 16, - "y": 15 + "y": 165 }, "id": 93, "options": { @@ -4841,7 +5517,7 @@ "targets": [ { "exemplar": false, - "expr": "topk($TopResources, volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", volume=~\"$TopVolumeWriteIOPS\"} * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteIOPS\"})", + "expr": "topk($TopResources, volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\",volume=~\"$TopVolumeWriteIOPS\"})", "interval": "", "legendFormat": "{{aggr}} - {{volume}} - {{style}}", "refId": "A" @@ -4852,7 +5528,7 @@ "type": "timeseries" } ], - "title": "Busy Volumes Drilldown", + "title": "Busy Volumes", "type": "row" } ], @@ -5108,7 +5784,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))", + "definition": "query_result(topk($TopResources, avg_over_time(volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))", "description": null, "error": null, "hide": 2, @@ -5118,7 +5794,7 @@ "name": "TopVolumeSizeUsed", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))", + "query": "query_result(topk($TopResources, avg_over_time(volume_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5131,7 +5807,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5141,7 +5817,7 @@ "name": "TopVolumeSizeUsedPercent", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_size_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5154,7 +5830,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5164,7 +5840,7 @@ "name": "TopVolumeSnapshotSizeUsed", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_snapshots_size_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5177,7 +5853,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5187,7 +5863,7 @@ "name": "TopVolumeSnapshotSizeUsedPercent", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_snapshot_reserve_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5200,7 +5876,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5210,7 +5886,7 @@ "name": "TopVolumeReadLatency", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_read_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5223,7 +5899,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5233,7 +5909,7 @@ "name": "TopVolumeReadThroughput", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_read_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5246,7 +5922,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5256,7 +5932,7 @@ "name": "TopVolumeReadIOPS", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_read_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5269,7 +5945,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5279,7 +5955,7 @@ "name": "TopVolumeWriteLatency", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_write_latency{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5292,7 +5968,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5302,7 +5978,7 @@ "name": "TopVolumeWriteThroughput", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_write_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5315,7 +5991,7 @@ "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", - "definition": "query_result(topk($TopResources, avg_over_time(volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "definition": "query_result(topk($TopResources, avg_over_time(volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", "description": null, "error": null, "hide": 2, @@ -5325,7 +6001,99 @@ "name": "TopVolumeWriteIOPS", "options": [], "query": { - "query": "query_result(topk($TopResources, avg_over_time(volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}]) * on (cluster, svm, volume) group_left(aggr) volume_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}))\n", + "query": "query_result(topk($TopResources, avg_over_time(volume_write_ops{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\".*$Aggregate.*\"}[${__range}])))\n", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}[${__range}])))", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -5333,6 +6101,75 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregateCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_capacity_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregatePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopAggregatePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(aggr_space_performance_tier_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", aggr=~\"$Aggregate\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*aggr=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, diff --git a/grafana/dashboards/cmode/cluster.json b/grafana/dashboards/cmode/cluster.json index 8ad681865..fad7836bb 100644 --- a/grafana/dashboards/cmode/cluster.json +++ b/grafana/dashboards/cmode/cluster.json @@ -3967,7 +3967,7 @@ "type": "timeseries" } ], - "title": "SVM Performance Drilldown", + "title": "SVM Performance", "type": "row" } ], diff --git a/grafana/dashboards/cmode/lun.json b/grafana/dashboards/cmode/lun.json index 896335bc6..661f08513 100644 --- a/grafana/dashboards/cmode/lun.json +++ b/grafana/dashboards/cmode/lun.json @@ -1415,7 +1415,7 @@ "type": "table" } ], - "title": "LUN Table Drilldown", + "title": "LUN Table", "type": "row" }, { @@ -1978,7 +1978,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Drilldown", + "title": "Top LUN Performance", "type": "row" }, { @@ -2299,7 +2299,7 @@ "type": "timeseries" } ], - "title": "Top LUN Performance Efficiency Drilldown", + "title": "Top LUN Performance Efficiency", "type": "row" }, { @@ -3891,7 +3891,7 @@ "type": "timeseries" } ], - "title": "Top Volume and LUN Capacity Drilldown", + "title": "Top Volume and LUN Capacity", "type": "row" }, { @@ -4526,7 +4526,7 @@ "type": "timeseries" } ], - "title": "Per LUN Drilldown (Must Select Cluster/SVM/Volume/LUN)", + "title": "Per LUN (Must Select Cluster/SVM/Volume/LUN)", "type": "row" } ], diff --git a/grafana/dashboards/cmode/mcc_cluster.json b/grafana/dashboards/cmode/mcc_cluster.json index 5e21b3790..26a73ed5f 100644 --- a/grafana/dashboards/cmode/mcc_cluster.json +++ b/grafana/dashboards/cmode/mcc_cluster.json @@ -1609,7 +1609,7 @@ } ], "repeat": null, - "title": "MetroCluster FCVI Drilldown", + "title": "MetroCluster FCVI", "type": "row" }, { @@ -1997,7 +1997,7 @@ } ], "repeat": null, - "title": "MetroCluster Iwarp Drilldown", + "title": "MetroCluster Iwarp", "type": "row" }, { @@ -2622,7 +2622,7 @@ } ], "repeat": null, - "title": "MetroCluster Disk Drilldown", + "title": "MetroCluster Disk", "type": "row" }, { @@ -2824,7 +2824,7 @@ } ], "repeat": null, - "title": "Disk and Tape Adapter Drilldown", + "title": "Disk and Tape Adapter", "type": "row" }, { @@ -3392,7 +3392,7 @@ } ], "repeat": null, - "title": "MetroCluster FibreBridge/Array Drilldown", + "title": "MetroCluster FibreBridge/Array", "type": "row" } ], diff --git a/grafana/dashboards/cmode/metadata.json b/grafana/dashboards/cmode/metadata.json index 8e8b0d7d2..ec88df431 100644 --- a/grafana/dashboards/cmode/metadata.json +++ b/grafana/dashboards/cmode/metadata.json @@ -3795,7 +3795,7 @@ "type": "timeseries" } ], - "title": "Collectors Drilldown", + "title": "Collectors", "type": "row" }, { @@ -4030,7 +4030,7 @@ "type": "timeseries" } ], - "title": "Prometheus Drilldown", + "title": "Prometheus", "type": "row" } ], diff --git a/grafana/dashboards/cmode/namespace.json b/grafana/dashboards/cmode/namespace.json index 46ce473bb..b072d6501 100644 --- a/grafana/dashboards/cmode/namespace.json +++ b/grafana/dashboards/cmode/namespace.json @@ -88,7 +88,7 @@ }, "id": 28, "panels": [], - "title": "Top NVMe Namespaces Performance Drilldown", + "title": "Top NVMe Namespaces Performance", "type": "row" }, { @@ -899,7 +899,7 @@ "type": "table" } ], - "title": "NVMe Namespaces Table Drilldown", + "title": "NVMe Namespaces Table", "type": "row" } ], diff --git a/grafana/dashboards/cmode/network.json b/grafana/dashboards/cmode/network.json index d7846b639..77097072a 100644 --- a/grafana/dashboards/cmode/network.json +++ b/grafana/dashboards/cmode/network.json @@ -1399,7 +1399,7 @@ "type": "timeseries" } ], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -2457,7 +2457,7 @@ "type": "timeseries" } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" }, { @@ -3018,7 +3018,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Drilldown", + "title": "NVMe/FC", "type": "row" }, { diff --git a/grafana/dashboards/cmode/nfs4storePool.json b/grafana/dashboards/cmode/nfs4storePool.json index 97ddcc551..65eed5f23 100644 --- a/grafana/dashboards/cmode/nfs4storePool.json +++ b/grafana/dashboards/cmode/nfs4storePool.json @@ -2000,7 +2000,7 @@ "type": "timeseries" } ], - "title": "Lock Drilldown", + "title": "Lock", "type": "row" } ], diff --git a/grafana/dashboards/cmode/node.json b/grafana/dashboards/cmode/node.json index b551b8aae..396babd25 100644 --- a/grafana/dashboards/cmode/node.json +++ b/grafana/dashboards/cmode/node.json @@ -1294,7 +1294,7 @@ "type": "timeseries" } ], - "title": "CPU Layer Drilldown", + "title": "CPU Layer", "type": "row" }, { @@ -1673,7 +1673,7 @@ "type": "timeseries" } ], - "title": "Network Layer Drilldown", + "title": "Network Layer", "type": "row" }, { @@ -2366,7 +2366,7 @@ "type": "timeseries" } ], - "title": "Backend Drilldown", + "title": "Backend", "type": "row" }, { @@ -3112,7 +3112,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Frontend Drilldown", + "title": "NFSv3 Frontend", "type": "row" }, { @@ -3456,7 +3456,7 @@ "type": "timeseries" } ], - "title": "CIFS Frontend Drilldown", + "title": "CIFS Frontend", "type": "row" }, { @@ -3870,7 +3870,7 @@ "type": "timeseries" } ], - "title": "FCP Frontend Drilldown", + "title": "FCP Frontend", "type": "row" }, { @@ -4286,7 +4286,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Frontend Drilldown", + "title": "NVMe/FC Frontend", "type": "row" }, { @@ -4700,7 +4700,7 @@ "type": "timeseries" } ], - "title": "iSCSI Frontend Drilldown", + "title": "iSCSI Frontend", "type": "row" } ], diff --git a/grafana/dashboards/cmode/power.json b/grafana/dashboards/cmode/power.json index 3e63d6154..c53fb4c30 100644 --- a/grafana/dashboards/cmode/power.json +++ b/grafana/dashboards/cmode/power.json @@ -71,7 +71,7 @@ "gnetId": null, "graphTooltip": 1, "id": null, - "iteration": 1693298975439, + "iteration": 1694717870555, "links": [ { "asDropdown": true, @@ -106,14 +106,14 @@ { "datasource": "${DS_PROMETHEUS}", "gridPos": { - "h": 2, + "h": 4, "w": 24, "x": 0, "y": 1 }, "id": 101, "options": { - "content": "In an ONTAP system with **embedded** disk shelves, power and temperature data might not be visible for individual shelves. This is because these metrics are computed at the cluster level, not at the level of individual shelves.\n\nEmbedded shelf power is shared among the common nodes.", + "content": "This dashboard requires ONTAP 9.6+ and REST private CLI.\n\nSee power algorithm for more details.\n\nPower and temperature metrics may not be visible for clusters with **embedded** disk shelves. Those metrics are computed at the cluster level, not at the level of individual shelves. Embedded shelf power is shared among the common nodes.", "mode": "markdown" }, "pluginVersion": "8.1.8", @@ -126,7 +126,7 @@ "h": 1, "w": 24, "x": 0, - "y": 3 + "y": 5 }, "id": 15, "panels": [], @@ -160,7 +160,7 @@ "h": 10, "w": 9, "x": 0, - "y": 4 + "y": 6 }, "id": 21, "links": [], @@ -226,7 +226,7 @@ "h": 5, "w": 7, "x": 9, - "y": 4 + "y": 6 }, "id": 71, "options": { @@ -301,7 +301,7 @@ "h": 5, "w": 4, "x": 16, - "y": 4 + "y": 6 }, "id": 68, "options": { @@ -360,7 +360,7 @@ "h": 5, "w": 4, "x": 20, - "y": 4 + "y": 6 }, "id": 70, "options": { @@ -419,7 +419,7 @@ "h": 5, "w": 7, "x": 9, - "y": 9 + "y": 11 }, "id": 96, "options": { @@ -494,7 +494,7 @@ "h": 5, "w": 4, "x": 16, - "y": 9 + "y": 11 }, "id": 64, "options": { @@ -554,7 +554,7 @@ "h": 5, "w": 4, "x": 20, - "y": 9 + "y": 11 }, "id": 97, "options": { @@ -680,7 +680,7 @@ "h": 8, "w": 24, "x": 0, - "y": 14 + "y": 16 }, "id": 82, "options": { @@ -787,7 +787,7 @@ "h": 8, "w": 13, "x": 0, - "y": 22 + "y": 24 }, "id": 73, "options": { @@ -878,7 +878,7 @@ "h": 8, "w": 11, "x": 13, - "y": 22 + "y": 24 }, "id": 74, "options": { @@ -968,7 +968,7 @@ "h": 8, "w": 13, "x": 0, - "y": 30 + "y": 32 }, "id": 79, "options": { @@ -1059,7 +1059,7 @@ "h": 8, "w": 11, "x": 13, - "y": 30 + "y": 32 }, "id": 88, "options": { @@ -1150,7 +1150,7 @@ "h": 8, "w": 13, "x": 0, - "y": 38 + "y": 40 }, "id": 87, "options": { @@ -1241,7 +1241,7 @@ "h": 8, "w": 11, "x": 13, - "y": 38 + "y": 40 }, "id": 89, "options": { @@ -1332,7 +1332,7 @@ "h": 8, "w": 13, "x": 0, - "y": 46 + "y": 48 }, "id": 94, "options": { @@ -1353,7 +1353,7 @@ "targets": [ { "exemplar": false, - "expr": "sum(aggr_disk_user_reads{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"} and on(aggr,cluster,node) aggr_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"}) by (aggr,cluster,node)", + "expr": "sum(aggr_disk_user_reads{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"}) by (aggr,cluster,node)", "hide": false, "interval": "", "legendFormat": "Reads - {{aggr}} ({{cluster}})", @@ -1361,7 +1361,7 @@ }, { "exemplar": false, - "expr": "sum(aggr_disk_user_writes{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"} and on(aggr,cluster,node) aggr_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"}) by (aggr,cluster,node)", + "expr": "sum(aggr_disk_user_writes{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",aggr=~\"$TopAggrPower\"}) by (aggr,cluster,node)", "hide": false, "interval": "", "legendFormat": "Writes - {{aggr}} ({{cluster}})", @@ -1380,7 +1380,7 @@ "h": 1, "w": 24, "x": 0, - "y": 54 + "y": 56 }, "id": 93, "panels": [ @@ -1635,7 +1635,7 @@ "h": 8, "w": 24, "x": 0, - "y": 52 + "y": 57 }, "id": 91, "interval": "1m", @@ -1664,37 +1664,7 @@ }, { "exemplar": false, - "expr": "aggr_space_total{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "D" - }, - { - "exemplar": false, - "expr": "aggr_space_used{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "E" - }, - { - "exemplar": false, - "expr": "aggr_space_used_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "F" - }, - { - "exemplar": false, - "expr": "sum(aggr_disk_total_transfers{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"} and on(aggr,cluster,node) aggr_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}) by (aggr,cluster,node)", + "expr": "sum(aggr_disk_total_transfers{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}) by (aggr,cluster,node)", "format": "table", "hide": false, "instant": true, @@ -1704,23 +1674,13 @@ }, { "exemplar": false, - "expr": "sum(aggr_disk_total_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"} and on(aggr,cluster,node) aggr_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}) by (aggr,cluster,node)", + "expr": "sum(aggr_disk_total_data{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}) by (aggr,cluster,node)", "format": "table", "hide": false, "instant": true, "interval": "", "legendFormat": "", "refId": "G" - }, - { - "exemplar": false, - "expr": "aggr_new_status{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" } ], "timeFrom": null, @@ -1768,7 +1728,7 @@ "h": 1, "w": 24, "x": 0, - "y": 55 + "y": 57 }, "id": 84, "panels": [ @@ -1893,7 +1853,7 @@ "h": 1, "w": 24, "x": 0, - "y": 56 + "y": 58 }, "id": 76, "panels": [ @@ -2430,7 +2390,7 @@ "h": 1, "w": 24, "x": 0, - "y": 57 + "y": 59 }, "id": 78, "panels": [ @@ -2849,7 +2809,7 @@ "h": 7, "w": 24, "x": 0, - "y": 58 + "y": 60 }, "id": 81, "interval": "1m", @@ -2861,6 +2821,7 @@ "pluginVersion": "8.1.8", "targets": [ { + "exemplar": false, "expr": "shelf_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", "format": "table", "instant": true, @@ -2890,7 +2851,7 @@ }, { "exemplar": false, - "expr": "shelf_power{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"}", + "expr": "shelf_power{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\"} * on(datacenter,cluster,shelf) shelf_labels{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",isEmbedded=\"No\"}", "format": "table", "hide": false, "instant": true, @@ -3399,5 +3360,5 @@ "timezone": "", "title": "ONTAP: Power", "uid": "", - "version": 10 + "version": 12 } diff --git a/grafana/dashboards/cmode/s3ObjectStorage.json b/grafana/dashboards/cmode/s3ObjectStorage.json index a36fe1dc6..edde1e1a9 100644 --- a/grafana/dashboards/cmode/s3ObjectStorage.json +++ b/grafana/dashboards/cmode/s3ObjectStorage.json @@ -112,7 +112,7 @@ "y": 3 }, "id": 16, - "title": "Bucket Drilldown", + "title": "Bucket", "type": "row" }, { diff --git a/grafana/dashboards/cmode/svm.json b/grafana/dashboards/cmode/svm.json index ad1e67e76..361fee0dc 100644 --- a/grafana/dashboards/cmode/svm.json +++ b/grafana/dashboards/cmode/svm.json @@ -1506,7 +1506,7 @@ "type": "timeseries" } ], - "title": "Volumes Performance Drilldown", + "title": "Volumes Performance", "type": "row" }, { @@ -2203,7 +2203,7 @@ "type": "timeseries" } ], - "title": "LIF Drilldown", + "title": "LIF", "type": "row" }, { @@ -3265,7 +3265,7 @@ "type": "timeseries" } ], - "title": "CIFS Drilldown", + "title": "CIFS", "type": "row" }, { @@ -4326,7 +4326,7 @@ "type": "timeseries" } ], - "title": "FCP Drilldown", + "title": "FCP", "type": "row" }, { @@ -5273,7 +5273,7 @@ "type": "timeseries" } ], - "title": "iSCSI Drilldown", + "title": "iSCSI", "type": "row" }, { @@ -6556,7 +6556,7 @@ "type": "timeseries" } ], - "title": "NFSv3 Drilldown", + "title": "NFSv3", "type": "row" }, { @@ -7856,7 +7856,7 @@ "type": "timeseries" } ], - "title": "NFSv4 Drilldown", + "title": "NFSv4", "type": "row" }, { @@ -9164,7 +9164,7 @@ "type": "timeseries" } ], - "title": "NFSv4.1 Drilldown", + "title": "NFSv4.1", "type": "row" }, { @@ -10226,7 +10226,7 @@ "type": "timeseries" } ], - "title": "NVMe/FC Drilldown", + "title": "NVMe/FC", "type": "row" }, { @@ -10480,7 +10480,7 @@ "type": "timeseries" } ], - "title": "Copy Offload Drilldown", + "title": "Copy Offload", "type": "row" }, { @@ -11417,7 +11417,7 @@ "type": "timeseries" } ], - "title": "QoS Policy Group Drilldown", + "title": "QoS Policy Group", "type": "row" }, { @@ -12242,7 +12242,7 @@ "type": "timeseries" } ], - "title": "QoS Policy Group Latency from Resource Drilldown", + "title": "QoS Policy Group Latency from Resource", "type": "row" }, { @@ -12886,7 +12886,7 @@ "type": "timeseries" } ], - "title": "Volume Capacity Drilldown", + "title": "Volume Capacity", "type": "row" }, { diff --git a/grafana/dashboards/cmode/volume.json b/grafana/dashboards/cmode/volume.json index dd12e165c..9e31e63a5 100644 --- a/grafana/dashboards/cmode/volume.json +++ b/grafana/dashboards/cmode/volume.json @@ -1675,7 +1675,7 @@ "type": "table" } ], - "title": "Volume Table Drilldown", + "title": "Volume Table", "type": "row" }, { @@ -2602,7 +2602,7 @@ "type": "timeseries" } ], - "title": "Volume WAFL Layer Drilldown", + "title": "Volume WAFL Layer", "type": "row" }, { @@ -3356,7 +3356,7 @@ "type": "timeseries" } ], - "title": "Top Volume End-to-End QoS Drilldown", + "title": "Top Volume End-to-End QoS", "type": "row" }, { @@ -4383,7 +4383,7 @@ "type": "timeseries" } ], - "title": "Top Volume QoS Resource Latency Drilldown", + "title": "Top Volume QoS Resource Latency", "type": "row" }, { @@ -5320,7 +5320,7 @@ "type": "table" } ], - "title": "Top Volume FabricPool Drilldown", + "title": "Top Volume Object Storage", "type": "row" }, { @@ -5332,6 +5332,399 @@ "x": 0, "y": 19 }, + "id": 99, + "panels": [ + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 119, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumePerformanceTierFootprint\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Performance Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 120, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, (volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumePerformanceTierFootprintPerc\"}))", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Performance Tier Footprint %", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 121, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumeCapacityTierFootprint\"})", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Capacity Tier Footprint", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 122, + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "topk($TopResources, volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\",svm=~\"$SVM\", volume=~\"$TopVolumeCapacityTierFootprintPerc\"})", + "hide": false, + "interval": "", + "legendFormat": "{{svm}} - {{volume}} ", + "refId": "A" + } + ], + "title": "Top $TopResources Volumes by Capacity Tier Footprint %", + "transformations": [], + "type": "timeseries" + } + ], + "title": "Top Volume FabricPool", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, "id": 98, "panels": [ { @@ -5391,7 +5784,7 @@ "h": 8, "w": 12, "x": 0, - "y": 20 + "y": 21 }, "id": 100, "options": { @@ -5478,7 +5871,7 @@ "h": 8, "w": 12, "x": 12, - "y": 20 + "y": 21 }, "id": 102, "options": { @@ -5566,7 +5959,7 @@ "h": 8, "w": 24, "x": 0, - "y": 28 + "y": 29 }, "id": 101, "options": { @@ -5596,7 +5989,7 @@ "type": "timeseries" } ], - "title": "Top Inode Drilldown", + "title": "Top Inode", "type": "row" }, { @@ -5606,7 +5999,7 @@ "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 21 }, "id": 105, "panels": [ @@ -5616,7 +6009,7 @@ "h": 2, "w": 24, "x": 0, - "y": 21 + "y": 22 }, "id": 110, "options": { @@ -5687,7 +6080,7 @@ "h": 8, "w": 8, "x": 0, - "y": 23 + "y": 24 }, "id": 108, "options": { @@ -5779,7 +6172,7 @@ "h": 8, "w": 8, "x": 8, - "y": 23 + "y": 24 }, "id": 106, "options": { @@ -5871,7 +6264,7 @@ "h": 8, "w": 8, "x": 16, - "y": 23 + "y": 24 }, "id": 107, "options": { @@ -7077,6 +7470,98 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprint", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumeCapacityTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_capacity_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "description": null, + "error": null, + "hide": 2, + "includeAll": true, + "label": null, + "multi": true, + "name": "TopVolumePerformanceTierFootprintPerc", + "options": [], + "query": { + "query": "query_result(topk($TopResources,avg_over_time(volume_performance_tier_footprint_percent{datacenter=~\"$Datacenter\",cluster=~\"$Cluster\", svm=~\"$SVM\",volume=~\"$Volume\"}[${__range}])))", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": ".*volume=\\\"(.*?)\\\".*", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, diff --git a/grafana/dashboards/influxdb/metadata.json b/grafana/dashboards/influxdb/metadata.json index 46af7f01b..6c5b0b728 100644 --- a/grafana/dashboards/influxdb/metadata.json +++ b/grafana/dashboards/influxdb/metadata.json @@ -3842,7 +3842,7 @@ }, "id": 60, "panels": [], - "title": "Collectors Drilldown", + "title": "Collectors", "type": "row" }, { diff --git a/grafana/dashboards/influxdb/network.json b/grafana/dashboards/influxdb/network.json index a626ca4b4..748d7d02f 100644 --- a/grafana/dashboards/influxdb/network.json +++ b/grafana/dashboards/influxdb/network.json @@ -79,7 +79,7 @@ }, "id": 105, "panels": [], - "title": "Ethernet Drilldown", + "title": "Ethernet", "type": "row" }, { @@ -3956,7 +3956,7 @@ } } ], - "title": "FibreChannel Drilldown", + "title": "FibreChannel", "type": "row" } ], diff --git a/grafana/dashboards/influxdb/snapmirror.json b/grafana/dashboards/influxdb/snapmirror.json index 41309af29..354b7f939 100644 --- a/grafana/dashboards/influxdb/snapmirror.json +++ b/grafana/dashboards/influxdb/snapmirror.json @@ -1940,7 +1940,7 @@ } } ], - "title": "Destination Node DrillDown", + "title": "Destination Node", "type": "row" }, { @@ -3190,7 +3190,7 @@ } } ], - "title": "Destination SVM Drilldown", + "title": "Destination SVM", "type": "row" } ], diff --git a/grafana/dashboards/influxdb/svm.json b/grafana/dashboards/influxdb/svm.json index 92f323ae0..ee0a39da2 100644 --- a/grafana/dashboards/influxdb/svm.json +++ b/grafana/dashboards/influxdb/svm.json @@ -73,7 +73,7 @@ }, "id": 37, "panels": [], - "title": "NFS${NFSv} Frontend Drilldown", + "title": "NFS${NFSv} Frontend", "type": "row" }, { diff --git a/grafana/dashboards/storagegrid/fabricpool.json b/grafana/dashboards/storagegrid/fabricpool.json index f6593b90d..0ca7a46a0 100644 --- a/grafana/dashboards/storagegrid/fabricpool.json +++ b/grafana/dashboards/storagegrid/fabricpool.json @@ -14,7 +14,7 @@ "id": "grafana", "name": "Grafana", "type": "grafana", - "version": "8.1.2" + "version": "8.1.8" }, { "id": "prometheus", @@ -65,7 +65,7 @@ "gnetId": null, "graphTooltip": 1, "id": null, - "iteration": 1681738814239, + "iteration": 1694089643456, "links": [ { "asDropdown": true, @@ -155,7 +155,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -232,7 +232,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -309,7 +309,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -373,7 +373,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -431,7 +431,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -491,7 +491,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -850,7 +850,7 @@ "showHeader": true, "sortBy": [] }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, @@ -1552,11 +1552,11 @@ } ] }, - "pluginVersion": "8.1.2", + "pluginVersion": "8.1.8", "targets": [ { "exemplar": false, - "expr": "bucket_bytes{cluster=~\"$SGCluster\",bucket=~\"$Container\"}", + "expr": "bucket_bytes{cluster=~\"$SGCluster\",bucket=~\"$Bucket\"}", "format": "table", "instant": true, "interval": "", @@ -1565,7 +1565,7 @@ }, { "exemplar": false, - "expr": "bucket_objects{cluster=~\"$SGCluster\",bucket=~\"$Container\"}", + "expr": "bucket_objects{cluster=~\"$SGCluster\",bucket=~\"$Bucket\"}", "format": "table", "hide": false, "instant": true, @@ -1729,38 +1729,12 @@ "sort": 1, "type": "query" }, - { - "allValue": null, - "current": {}, - "datasource": "${DS_PROMETHEUS}", - "definition": "label_values(cloud_target_labels{cluster=~\"$Cluster\"}, container)", - "description": null, - "error": null, - "hide": 2, - "includeAll": true, - "label": null, - "multi": true, - "name": "Container", - "options": [], - "query": { - "query": "label_values(cloud_target_labels{cluster=~\"$Cluster\"}, container)", - "refId": "StandardVariableQuery" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tagsQuery": "", - "type": "query", - "useTags": false - }, { "allValue": null, "current": { - "selected": false, - "text": "6", - "value": "6" + "selected": true, + "text": "5", + "value": "5" }, "description": null, "error": null, @@ -1937,6 +1911,32 @@ "skipUrlSync": false, "sort": 0, "type": "query" + }, + { + "allValue": null, + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(cloud_target_labels{cluster=~\"$Cluster\"},name)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Bucket", + "multi": true, + "name": "Bucket", + "options": [], + "query": { + "query": "label_values(cloud_target_labels{cluster=~\"$Cluster\"},name)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false } ] }, @@ -1948,5 +1948,5 @@ "timezone": "", "title": "ONTAP: StorageGrid FabricPool", "uid": "", - "version": 1 + "version": 2 } diff --git a/grafana/dashboards/storagegrid/overview.json b/grafana/dashboards/storagegrid/overview.json index c7c849c76..e19104346 100644 --- a/grafana/dashboards/storagegrid/overview.json +++ b/grafana/dashboards/storagegrid/overview.json @@ -60,11 +60,12 @@ } ] }, + "description": "", "editable": true, "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1678383760521, + "iteration": 1693920052430, "links": [], "panels": [ { @@ -1093,7 +1094,7 @@ "h": 10, "w": 12, "x": 0, - "y": 22 + "y": 3 }, "id": 12, "options": { @@ -1159,8 +1160,10 @@ "mode": "off" } }, - "decimals": 2, + "decimals": 1, "mappings": [], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -1174,7 +1177,7 @@ } ] }, - "unit": "decbytes" + "unit": "percentunit" }, "overrides": [] }, @@ -1182,7 +1185,7 @@ "h": 10, "w": 12, "x": 12, - "y": 22 + "y": 3 }, "id": 14, "options": { @@ -2141,6 +2144,7 @@ "type": "row" } ], + "refresh": "", "schemaVersion": 30, "style": "dark", "tags": [ @@ -2523,5 +2527,5 @@ "timezone": "", "title": "StorageGrid: Overview", "uid": "", - "version": 3 + "version": 4 } diff --git a/harvest.cue b/harvest.cue index ae33e119d..3b8b21345 100644 --- a/harvest.cue +++ b/harvest.cue @@ -50,8 +50,8 @@ label: [string]: string } #CertificateScript: { - path: string - timeout?: string + path: string + timeout?: string } #CredentialsScript: { @@ -69,11 +69,13 @@ Pollers: [Name=_]: #Poller #Poller: { addr?: string auth_style?: "basic_auth" | "certificate_auth" + ca_cert?: string + certificate_script?: #CertificateScript client_timeout?: string collectors?: [...#CollectorDef] | [...string] + conf_path?: string credentials_file?: string credentials_script?: #CredentialsScript - certificate_script?: #CertificateScript datacenter?: string exporters: [...string] is_kfs?: bool diff --git a/integration/Jenkinsfile b/integration/Jenkinsfile index 2ff39f0a3..e7302d034 100644 --- a/integration/Jenkinsfile +++ b/integration/Jenkinsfile @@ -18,7 +18,7 @@ pipeline { environment { BUILD_ID="dontKillMe" JENKINS_NODE_COOKIE="dontKillMe" - GO_VERSION = "1.21.0" + GO_VERSION = "1.21.1" } stages { diff --git a/integration/go.mod b/integration/go.mod index a231c398c..21ce0ad43 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -7,21 +7,21 @@ replace github.com/netapp/harvest/v2 => ../ require ( github.com/carlmjohnson/requests v0.23.4 github.com/netapp/harvest/v2 v2.0.0-20230811164902-1b91e21d6950 - github.com/rs/zerolog v1.30.0 - github.com/tidwall/gjson v1.16.0 - golang.org/x/text v0.12.0 + github.com/rs/zerolog v1.31.0 + github.com/tidwall/gjson v1.17.0 + golang.org/x/text v0.13.0 ) require ( dario.cat/mergo v1.0.0 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/shirou/gopsutil/v3 v3.23.7 // indirect + github.com/shirou/gopsutil/v3 v3.23.9 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -31,6 +31,6 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect golang.org/x/net v0.14.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/integration/go.sum b/integration/go.sum index 44ae3d0d1..deefdb9f6 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -7,8 +7,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -20,10 +21,8 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -34,11 +33,11 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= -github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= -github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= +github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= +github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -54,17 +53,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= -github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= @@ -73,17 +70,15 @@ golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/integration/test/metric_test.go b/integration/test/metric_test.go index eb1329012..631109314 100644 --- a/integration/test/metric_test.go +++ b/integration/test/metric_test.go @@ -25,7 +25,7 @@ var skipDuplicates = map[string]bool{ func TestPollerMetrics(t *testing.T) { utils.SkipIfMissing(t, utils.Regression) - err := conf.LoadHarvestConfig(installer.HarvestConfigFile) + _, err := conf.LoadHarvestConfig(installer.HarvestConfigFile) if err != nil { log.Fatal().Err(err).Msg("Unable to load harvest config") } diff --git a/integration/test/utils/utils.go b/integration/test/utils/utils.go index a87f21cac..e8d4c5170 100644 --- a/integration/test/utils/utils.go +++ b/integration/test/utils/utils.go @@ -248,7 +248,7 @@ func WriteToken(token string) { var err error filename := "harvest.yml" abs, _ := filepath.Abs(filename) - err = conf.LoadHarvestConfig(filename) + _, err = conf.LoadHarvestConfig(filename) PanicIfNotNil(err) tools := conf.Config.Tools if tools != nil { diff --git a/jenkins/artifacts/jenkinsfile b/jenkins/artifacts/jenkinsfile index 86430d77f..3a5edb068 100644 --- a/jenkins/artifacts/jenkinsfile +++ b/jenkins/artifacts/jenkinsfile @@ -37,7 +37,7 @@ pipeline { jfrogImagePrefix = "netappdownloads.jfrog.io/oss-docker-harvest-production/harvest" jfrogRepo = "netappdownloads.jfrog.io" COMMIT_ID = sh(returnStdout: true, script: 'git rev-parse HEAD') - GO_VERSION = "1.21.0" + GO_VERSION = "1.21.1" } stages { diff --git a/mkdocs.yml b/mkdocs.yml index 7458f0184..9c898f715 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -44,11 +44,12 @@ nav: - 'FAQ': 'help/faq.md' - 'Log Collection': 'help/log-collection.md' - Reference: - - 'Plugins': 'plugins.md' - 'Matrix': 'resources/matrix.md' - 'ONTAP Metrics': 'ontap-metrics.md' - - 'Templates And Metrics': 'resources/templates-and-metrics.md' + - 'Power Algorithm': 'resources/power-algorithm.md' + - 'Plugins': 'plugins.md' - 'REST Perf Metrics': 'resources/rest-perf-metrics.md' + - 'Templates And Metrics': 'resources/templates-and-metrics.md' - About: - 'License': 'license.md' - 'Release Notes': 'release-notes.md' diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 7549ed1e7..04ef0183d 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -310,8 +310,7 @@ func handCertificateAuth(c *Credentials, poller *conf.Poller, insecureTLS bool) keyPath := poller.SslKey if certPath == "" || keyPath == "" { - o := &options.Options{} - options.SetPathsAndHostname(o) + o := options.New() pathPrefix = path.Join(o.HomePath, "cert/", o.Hostname) } diff --git a/pkg/conf/conf.go b/pkg/conf/conf.go index ce16a2207..64c52cbdc 100644 --- a/pkg/conf/conf.go +++ b/pkg/conf/conf.go @@ -6,6 +6,7 @@ package conf import ( "dario.cat/mergo" + "errors" "fmt" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -15,6 +16,7 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" ) @@ -24,17 +26,19 @@ var configRead = false const ( DefaultAPIVersion = "1.3" DefaultTimeout = "30s" + DefaultConfPath = "conf" HarvestYML = "harvest.yml" BasicAuth = "basic_auth" CertificateAuth = "certificate_auth" + HomeEnvVar = "HARVEST_CONF" ) -// TestLoadHarvestConfig is used by testing code to reload a new config +// TestLoadHarvestConfig loads a new config - used by testing code func TestLoadHarvestConfig(configPath string) { configRead = false Config = HarvestConfig{} promPortRangeMapping = make(map[string]PortMap) - err := LoadHarvestConfig(configPath) + _, err := LoadHarvestConfig(configPath) if err != nil { log.Fatalf("Failed to load config at=[%s] err=%+v\n", configPath, err) } @@ -42,57 +46,121 @@ func TestLoadHarvestConfig(configPath string) { func ConfigPath(path string) string { // Harvest uses the following precedence order. Each item takes precedence over the - // item below it: - // 1. --config command line flag - // 2. HARVEST_CONFIG environment variable + // item below it. All paths are relative to `HARVEST_CONF` environment variable + // 1. `--config` command line flag + // 2. `HARVEST_CONFIG` environment variable // 3. no command line argument and no environment variable, use the default path (HarvestYML) if path != HarvestYML && path != "./"+HarvestYML { - return path + return Path(path) } fp := os.Getenv("HARVEST_CONFIG") - if fp == "" { - return path + if fp != "" { + path = fp } - return fp + return Path(path) } -func LoadHarvestConfig(configPath string) error { +func LoadHarvestConfig(configPath string) (string, error) { + var ( + contents []byte + duplicates []error + err error + ) + + configPath = ConfigPath(configPath) if configRead { - return nil + return configPath, nil } - configPath = ConfigPath(configPath) - contents, err := os.ReadFile(configPath) + contents, err = os.ReadFile(configPath) if err != nil { - fmt.Printf("error reading config file=[%s] %+v\n", configPath, err) - return err + return "", fmt.Errorf("error reading %s err=%w", configPath, err) } err = DecodeConfig(contents) if err != nil { fmt.Printf("error unmarshalling config file=[%s] %+v\n", configPath, err) - return err + return "", err } - return nil + + for _, pat := range Config.PollerFiles { + fs, err := filepath.Glob(pat) + if err != nil { + return "", fmt.Errorf("error retrieving poller_files path=%s err=%w", pat, err) + } + + sort.Strings(fs) + + if len(fs) == 0 { + fmt.Printf("add 0 poller(s) from poller_file=%s because no matching paths\n", pat) + continue + } + + for _, filename := range fs { + fsContents, err := os.ReadFile(filename) + if err != nil { + return "", fmt.Errorf("error reading poller_file=%s err=%w", filename, err) + } + cfg, err := unmarshalConfig(fsContents) + if err != nil { + return "", fmt.Errorf("error unmarshalling poller_file=%s err=%w", filename, err) + } + for _, pName := range cfg.PollersOrdered { + _, ok := Config.Pollers[pName] + if ok { + duplicates = append(duplicates, fmt.Errorf("poller name=%s from poller_file=%s is not unique", pName, filename)) + continue + } + Config.Pollers[pName] = cfg.Pollers[pName] + Config.PollersOrdered = append(Config.PollersOrdered, pName) + } + fmt.Printf("add %d poller(s) from poller_file=%s\n", len(cfg.PollersOrdered), filename) + } + } + + if len(duplicates) > 0 { + return "", errors.Join(duplicates...) + } + + // Fix promIndex for combined pollers + for i, name := range Config.PollersOrdered { + Config.Pollers[name].promIndex = i + } + return configPath, nil } -func DecodeConfig(contents []byte) error { - err := yaml.Unmarshal(contents, &Config) - configRead = true +func unmarshalConfig(contents []byte) (*HarvestConfig, error) { + var ( + cfg HarvestConfig + orderedConfig OrderedConfig + err error + ) + + err = yaml.Unmarshal(contents, &cfg) if err != nil { - return fmt.Errorf("error unmarshalling config err: %w", err) + return nil, fmt.Errorf("error unmarshalling config: %w", err) } - // Until https://github.com/go-yaml/yaml/issues/717 is fixed - // read the yaml again to determine poller order - orderedConfig := OrderedConfig{} + + // Read the yaml again to determine poller order err = yaml.Unmarshal(contents, &orderedConfig) if err != nil { - return err + return nil, fmt.Errorf("error unmarshalling ordered config: %w", err) } - Config.PollersOrdered = orderedConfig.Pollers.namesInOrder + cfg.PollersOrdered = orderedConfig.Pollers.namesInOrder for i, name := range Config.PollersOrdered { Config.Pollers[name].promIndex = i } + return &cfg, nil +} + +func DecodeConfig(contents []byte) error { + cfg, err := unmarshalConfig(contents) + configRead = true + if err != nil { + return fmt.Errorf("error unmarshalling config err: %w", err) + } + Config = *cfg + // Merge pollers and defaults pollers := Config.Pollers defaults := Config.Defaults @@ -172,7 +240,7 @@ func PollerNamed(name string) (*Poller, error) { // The final path will be relative to the HARVEST_CONF environment variable // or ./ when the environment variable is not set func Path(elem ...string) string { - home := os.Getenv("HARVEST_CONF") + home := os.Getenv(HomeEnvVar) paths := append([]string{home}, elem...) return filepath.Join(paths...) } @@ -277,23 +345,23 @@ func (i *IntRange) UnmarshalYAML(node *yaml.Node) error { if node.Kind == yaml.ScalarNode && node.ShortTag() == "!!str" { matches := rangeRegex.FindStringSubmatch(node.Value) if len(matches) == 3 { - min, err1 := strconv.Atoi(matches[1]) - max, err2 := strconv.Atoi(matches[2]) + minVal, err1 := strconv.Atoi(matches[1]) + maxVal, err2 := strconv.Atoi(matches[2]) if err1 != nil { return err1 } if err2 != nil { return err2 } - i.Min = min - i.Max = max + i.Min = minVal + i.Max = maxVal } } return nil } -// GetUniqueExporters returns the unique set of exporter types from the list of export names -// For example: If 2 prometheus exporters are configured for a poller, the last one is returned +// GetUniqueExporters returns the unique set of exporter types from the list of export names. +// For example, if two prometheus exporters are configured for a poller, the last one is returned func GetUniqueExporters(exporterNames []string) []string { var resultExporters []string definedExporters := Config.Exporters @@ -378,6 +446,7 @@ type Poller struct { UseInsecureTLS *bool `yaml:"use_insecure_tls,omitempty"` Username string `yaml:"username,omitempty"` PreferZAPI bool `yaml:"prefer_zapi,omitempty"` + ConfPath string `yaml:"conf_path,omitempty"` promIndex int Name string } @@ -487,6 +556,9 @@ func ZapiPoller(n *node.Node) *Poller { names := logSet.GetAllChildNamesS() p.LogSet = &names } + if confPath := n.GetChildContentS("conf_path"); confPath != "" { + p.ConfPath = confPath + } return &p } @@ -567,6 +639,7 @@ type HarvestConfig struct { Tools *Tools `yaml:"Tools,omitempty"` Exporters map[string]Exporter `yaml:"Exporters,omitempty"` Pollers map[string]*Poller `yaml:"Pollers,omitempty"` + PollerFiles []string `yaml:"Poller_files,omitempty"` Defaults *Poller `yaml:"Defaults,omitempty"` Admin Admin `yaml:"Admin,omitempty"` PollersOrdered []string // poller names in same order as yaml config diff --git a/pkg/conf/conf_test.go b/pkg/conf/conf_test.go index a003a1b66..51d30b234 100644 --- a/pkg/conf/conf_test.go +++ b/pkg/conf/conf_test.go @@ -5,6 +5,7 @@ import ( "reflect" "sort" "strconv" + "strings" "testing" ) @@ -281,3 +282,77 @@ func TestNodeToPoller(t *testing.T) { testArg(t, "30s", poller.ClientTimeout) testArg(t, "true", strconv.FormatBool(*poller.UseInsecureTLS)) } + +func TestReadHarvestConfigFromEnv(t *testing.T) { + t.Helper() + resetConfig() + t.Setenv(HomeEnvVar, "testdata") + cp, err := LoadHarvestConfig(HarvestYML) + if err != nil { + t.Errorf("Failed to load config at=[%s] err=%+v\n", HarvestYML, err) + return + } + wantCp := "testdata/harvest.yml" + if cp != wantCp { + t.Errorf("configPath got=%s want=%s", cp, wantCp) + } + poller := Config.Pollers["star"] + if poller == nil { + t.Errorf("check if star poller exists. got=nil want=poller") + } +} + +func resetConfig() { + configRead = false + Config = HarvestConfig{} +} + +func TestMultiplePollerFiles(t *testing.T) { + t.Helper() + resetConfig() + _, err := LoadHarvestConfig("testdata/pollerFiles/harvest.yml") + + wantNumErrs := 2 + numErrs := strings.Count(err.Error(), "\n") + 1 + if numErrs != wantNumErrs { + t.Errorf("got %d errors, want %d", numErrs, wantNumErrs) + } + + wantNumPollers := 10 + if len(Config.Pollers) != wantNumPollers { + t.Errorf("got %d pollers, want %d", len(Config.Pollers), wantNumPollers) + } + + if len(Config.PollersOrdered) != wantNumPollers { + t.Errorf("got %d ordered pollers, want %d", len(Config.PollersOrdered), wantNumPollers) + } + + wantToken := "token" + if Config.Tools.GrafanaAPIToken != wantToken { + t.Errorf("got token=%s, want token=%s", Config.Tools.GrafanaAPIToken, wantToken) + } + + orderWanted := []string{ + "star", + "netapp1", + "netapp2", + "netapp3", + "netapp4", + "netapp5", + "netapp6", + "netapp7", + "netapp8", + "moon", + } + + for i, n := range orderWanted { + named, err := PollerNamed(n) + if err != nil { + t.Errorf("got no poller, want poller named=%s", n) + continue + } + if named.promIndex != i { + t.Errorf("got promIndex=%d, want promIndex=%d", named.promIndex, i) + } + } +} diff --git a/pkg/conf/testdata/harvest.yml b/pkg/conf/testdata/harvest.yml new file mode 100644 index 000000000..715be42e1 --- /dev/null +++ b/pkg/conf/testdata/harvest.yml @@ -0,0 +1,6 @@ + +Pollers: + star: + addr: localhost + collectors: + - Simple diff --git a/pkg/conf/testdata/pollerFiles/dup.yml b/pkg/conf/testdata/pollerFiles/dup.yml new file mode 100644 index 000000000..7c1c5be71 --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/dup.yml @@ -0,0 +1,4 @@ + +Pollers: + star: + addr: localhost diff --git a/pkg/conf/testdata/pollerFiles/harvest.yml b/pkg/conf/testdata/pollerFiles/harvest.yml new file mode 100644 index 000000000..8e744cc64 --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/harvest.yml @@ -0,0 +1,16 @@ +Tools: + grafana_api_token: token + +Poller_files: + - testdata/pollerFiles/many/*.yml + - testdata/pollerFiles/single.yml + - testdata/pollerFiles/missing1.yml + - testdata/pollerFiles/missing2.yml + - testdata/pollerFiles/single.yml # will cause duplicate because it is listed twice + - testdata/pollerFiles/dup.yml # will cause duplicate because it contains star again + +Pollers: + star: + addr: localhost + collectors: + - Simple diff --git a/pkg/conf/testdata/pollerFiles/many/00.yml b/pkg/conf/testdata/pollerFiles/many/00.yml new file mode 100644 index 000000000..06761669f --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/00.yml @@ -0,0 +1,16 @@ +Pollers: + netapp1: + datacenter: rtp + addr: 1.1.1.1 + netapp2: + datacenter: rtp + addr: 1.1.1.2 + netapp3: + datacenter: rtp + addr: 1.1.1.3 + netapp4: + datacenter: rtp + addr: 1.1.1.4 + +Tools: + grafana_api_token: ignore diff --git a/pkg/conf/testdata/pollerFiles/many/b.yml b/pkg/conf/testdata/pollerFiles/many/b.yml new file mode 100644 index 000000000..08a8bae2d --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/b.yml @@ -0,0 +1,13 @@ +Pollers: + netapp5: + datacenter: blr + addr: 1.1.1.5 + netapp6: + datacenter: blr + addr: 1.1.1.6 + netapp7: + datacenter: blr + addr: 1.1.1.7 + netapp8: + datacenter: blr + addr: 1.1.1.8 diff --git a/pkg/conf/testdata/pollerFiles/many/nomatch.yaml b/pkg/conf/testdata/pollerFiles/many/nomatch.yaml new file mode 100644 index 000000000..06761669f --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/many/nomatch.yaml @@ -0,0 +1,16 @@ +Pollers: + netapp1: + datacenter: rtp + addr: 1.1.1.1 + netapp2: + datacenter: rtp + addr: 1.1.1.2 + netapp3: + datacenter: rtp + addr: 1.1.1.3 + netapp4: + datacenter: rtp + addr: 1.1.1.4 + +Tools: + grafana_api_token: ignore diff --git a/pkg/conf/testdata/pollerFiles/single.yml b/pkg/conf/testdata/pollerFiles/single.yml new file mode 100644 index 000000000..fa45e5fab --- /dev/null +++ b/pkg/conf/testdata/pollerFiles/single.yml @@ -0,0 +1,14 @@ + +Poller_files: # these will be ignored since they are in single.yml + - testdata/pollerFiles/many/*.yml + - testdata/pollerFiles/single.yml + - testdata/pollerFiles/missing1.yml + - testdata/pollerFiles/missing2.yml + - testdata/pollerFiles/missing3.yml + - testdata/pollerFiles/single.yml + +Pollers: + moon: + addr: localhost + collectors: + - Simple diff --git a/pkg/dict/dict.go b/pkg/dict/dict.go index 5cc9aad3a..b66f6fbec 100644 --- a/pkg/dict/dict.go +++ b/pkg/dict/dict.go @@ -4,135 +4,20 @@ package dict -import ( - "reflect" - "slices" - "strings" -) +import "strings" -type Dict struct { - dict map[string]string -} - -func New() *Dict { - d := Dict{} - d.dict = make(map[string]string) - return &d -} - -func (d *Dict) Copy(labels ...string) *Dict { - c := &Dict{} - if len(labels) == 0 { - c.dict = make(map[string]string, len(d.dict)) - for k, v := range d.dict { - c.dict[k] = v - } - } else { - c.dict = make(map[string]string, len(labels)) - for _, k := range labels { - if v, ok := d.dict[k]; ok { - c.dict[k] = v - } - } - } - return c -} - -func (d *Dict) Set(key, val string) { - d.dict[key] = val -} - -// SetAll sets all global labels that do not already exist -func (d *Dict) SetAll(allKeyVals *Dict) { - if allKeyVals != nil { - for key, val := range allKeyVals.dict { - if _, has := d.dict[key]; !has { - d.dict[key] = val - } - } - } -} - -func (d *Dict) Delete(key string) { - delete(d.dict, key) -} - -func (d *Dict) Get(key string) string { - if value, has := d.dict[key]; has { - return value - } - return "" -} - -func (d *Dict) Pop(key string) string { - if value, has := d.GetHas(key); has { - d.Delete(key) - return value +func String(m map[string]string) string { + b := strings.Builder{} + for k, v := range m { + b.WriteString(k) + b.WriteString("=") + b.WriteString(v) + b.WriteString(", ") } - return "" -} - -func (d *Dict) GetHas(key string) (string, bool) { - value, has := d.dict[key] - return value, has -} - -func (d *Dict) Has(key string) bool { - _, has := d.dict[key] - return has -} - -func (d *Dict) Iter() map[string]string { - return d.dict -} - -func (d *Dict) Map() map[string]string { - return d.dict -} - -func (d *Dict) Keys() []string { - keys := make([]string, 0, len(d.dict)) - for k := range d.dict { - keys = append(keys, k) - } - return keys -} - -func (d *Dict) String() string { - s := make([]string, 0, len(d.dict)) - for k, v := range d.dict { - s = append(s, k+"="+v) - } - return strings.Join(s, ", ") -} - -func (d *Dict) Values() []string { - values := make([]string, 0, len(d.dict)) - for _, v := range d.dict { - values = append(values, v) - } - return values -} - -func (d *Dict) IsEmpty() bool { - return len(d.dict) == 0 -} - -func (d *Dict) Size() int { - return len(d.dict) -} -// CompareLabels The function compares the labels in the current Dict with the previous Dict. -// returns current and previous value of a label if values are different. label should exist in []labels -func (d *Dict) CompareLabels(prev *Dict, labels []string) (*Dict, *Dict) { - cur := New() - old := New() - for key, val1 := range d.dict { - val2, ok := prev.dict[key] - if slices.Contains(labels, key) && (!ok || !reflect.DeepEqual(val1, val2)) { - cur.dict[key] = val1 - old.dict[key] = val2 - } + s := b.String() + if len(s) > 0 { + return s[:len(s)-2] } - return cur, old + return s } diff --git a/pkg/dict/dict_test.go b/pkg/dict/dict_test.go new file mode 100644 index 000000000..d13010e2c --- /dev/null +++ b/pkg/dict/dict_test.go @@ -0,0 +1,28 @@ +package dict + +import ( + "strings" + "testing" +) + +func TestString(t *testing.T) { + tests := []struct { + name string + args map[string]string + wantCommas int + }{ + {name: "empty", args: make(map[string]string), wantCommas: 0}, + {name: "none", args: map[string]string{"a": "a"}, wantCommas: 0}, + {name: "one", args: map[string]string{"a": "a", "b": "b"}, wantCommas: 1}, + {name: "two", args: map[string]string{"a": "a", "b": "b", "c": "c"}, wantCommas: 2}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := String(tt.args) + gotCommas := strings.Count(s, ",") + if gotCommas != tt.wantCommas { + t.Errorf("String() commas got=%d, want=%d", gotCommas, tt.wantCommas) + } + }) + } +} diff --git a/pkg/matrix/instance.go b/pkg/matrix/instance.go index 16f5ab0a7..78c57ba08 100644 --- a/pkg/matrix/instance.go +++ b/pkg/matrix/instance.go @@ -5,41 +5,41 @@ package matrix import ( - "github.com/netapp/harvest/v2/pkg/dict" + "maps" ) // Instance struct and related methods type Instance struct { index int - labels *dict.Dict + labels map[string]string exportable bool } func NewInstance(index int) *Instance { me := &Instance{index: index} - me.labels = dict.New() + me.labels = make(map[string]string) me.exportable = true return me } func (i *Instance) GetLabel(key string) string { - return i.labels.Get(key) + return i.labels[key] } -func (i *Instance) GetLabels() *dict.Dict { +func (i *Instance) GetLabels() map[string]string { return i.labels } func (i *Instance) ClearLabels() { - i.labels = dict.New() + clear(i.labels) } func (i *Instance) SetLabel(key, value string) { - i.labels.Set(key, value) + i.labels[key] = value } -func (i *Instance) SetLabels(labels *dict.Dict) { +func (i *Instance) SetLabels(labels map[string]string) { i.labels = labels } @@ -51,9 +51,9 @@ func (i *Instance) SetExportable(b bool) { i.exportable = b } -func (i *Instance) Clone(isExportable bool, labels ...string) *Instance { +func (i *Instance) Clone(isExportable bool) *Instance { clone := NewInstance(i.index) - clone.labels = i.labels.Copy(labels...) + clone.labels = maps.Clone(i.labels) clone.exportable = isExportable return clone } diff --git a/pkg/matrix/matrix.go b/pkg/matrix/matrix.go index cece259eb..608f865c0 100644 --- a/pkg/matrix/matrix.go +++ b/pkg/matrix/matrix.go @@ -12,7 +12,6 @@ package matrix import ( "fmt" - "github.com/netapp/harvest/v2/pkg/dict" "github.com/netapp/harvest/v2/pkg/errs" "github.com/netapp/harvest/v2/pkg/logging" "github.com/netapp/harvest/v2/pkg/tree/node" @@ -23,7 +22,7 @@ type Matrix struct { UUID string Object string Identifier string - globalLabels *dict.Dict + globalLabels map[string]string instances map[string]*Instance metrics map[string]*Metric // ONTAP metric name => metric (in templates, this is left side) displayMetrics map[string]string // display name of metric to => metric name (in templates, this is right side) @@ -41,7 +40,7 @@ type With struct { func New(uuid, object string, identifier string) *Matrix { me := Matrix{UUID: uuid, Object: object, Identifier: identifier} - me.globalLabels = dict.New() + me.globalLabels = make(map[string]string) me.instances = make(map[string]*Instance) me.metrics = make(map[string]*Metric) me.displayMetrics = make(map[string]string) @@ -288,15 +287,22 @@ func (m *Matrix) RemoveInstance(key string) { } func (m *Matrix) SetGlobalLabel(label, value string) { - m.globalLabels.Set(label, value) + m.globalLabels[label] = value } -// SetGlobalLabels sets all global labels that do not already exist -func (m *Matrix) SetGlobalLabels(allLabels *dict.Dict) { - m.globalLabels.SetAll(allLabels) +// SetGlobalLabels copies allLabels to globalLabels when the label does not exist in globalLabels +func (m *Matrix) SetGlobalLabels(allLabels map[string]string) { + if allLabels == nil { + return + } + for key, val := range allLabels { + if _, has := m.globalLabels[key]; !has { + m.globalLabels[key] = val + } + } } -func (m *Matrix) GetGlobalLabels() *dict.Dict { +func (m *Matrix) GetGlobalLabels() map[string]string { return m.globalLabels } diff --git a/pkg/matrix/metric.go b/pkg/matrix/metric.go index ed548d99c..60eacb643 100644 --- a/pkg/matrix/metric.go +++ b/pkg/matrix/metric.go @@ -13,7 +13,7 @@ package matrix import ( "fmt" - "github.com/netapp/harvest/v2/pkg/dict" + "maps" "strconv" ) @@ -25,7 +25,7 @@ type Metric struct { array bool histogram bool exportable bool - labels *dict.Dict + labels map[string]string buckets *[]string record []bool values []float64 @@ -42,9 +42,7 @@ func (m *Metric) Clone(deep bool) *Metric { histogram: m.histogram, buckets: m.buckets, } - if m.labels != nil { - clone.labels = m.labels.Copy() - } + clone.labels = maps.Clone(m.labels) if deep { if len(m.record) != 0 { clone.record = make([]bool, len(m.record)) @@ -100,9 +98,9 @@ func (m *Metric) SetArray(c bool) { func (m *Metric) SetLabel(key, value string) { if m.labels == nil { - m.labels = dict.New() + m.labels = make(map[string]string) } - m.labels.Set(key, value) + m.labels[key] = value } func (m *Metric) SetHistogram(b bool) { @@ -121,23 +119,23 @@ func (m *Metric) SetBuckets(buckets *[]string) { m.buckets = buckets } -func (m *Metric) SetLabels(labels *dict.Dict) { +func (m *Metric) SetLabels(labels map[string]string) { m.labels = labels } func (m *Metric) GetLabel(key string) string { if m.labels != nil { - return m.labels.Get(key) + return m.labels[key] } return "" } -func (m *Metric) GetLabels() *dict.Dict { +func (m *Metric) GetLabels() map[string]string { return m.labels } func (m *Metric) HasLabels() bool { - return m.labels != nil && m.labels.Size() != 0 + return m.labels != nil && len(m.labels) > 0 } func (m *Metric) GetRecords() []bool { diff --git a/prom-stack.tmpl b/prom-stack.tmpl index 4ae661924..7d917fb3c 100644 --- a/prom-stack.tmpl +++ b/prom-stack.tmpl @@ -26,7 +26,7 @@ services: - '--web.console.templates=/usr/share/prometheus/consoles' # - '--web.enable-admin-api' # Enable to delete time series data from Prometheus see https://www.robustperception.io/deleting-time-series-from-prometheus ports: - - {{ .PromPort }}:9090 + - "{{ .PromPort }}:9090" networks: - backend restart: unless-stopped @@ -39,7 +39,7 @@ services: depends_on: - prometheus ports: - - {{ .GrafanaPort }}:3000 + - "{{ .GrafanaPort }}:3000" volumes: - grafana_data:/var/lib/grafana - ./grafana:/etc/grafana/provisioning # import Harvest dashboards diff --git a/vendor/github.com/go-ole/go-ole/SECURITY.md b/vendor/github.com/go-ole/go-ole/SECURITY.md new file mode 100644 index 000000000..dac281523 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/go-ole/go-ole/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml index 0d557ac2f..8df7fa26e 100644 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -6,14 +6,9 @@ version: "1.3.0.{build}-alpha-{branch}" -os: Windows Server 2012 R2 +os: Visual Studio 2019 -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 +build: off skip_tags: true @@ -21,20 +16,40 @@ clone_folder: c:\gopath\src\github.com\go-ole\go-ole environment: GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% +before_test: # - Download COM Server - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set + +test_script: + - go test -v -cover ./... + # go vet has false positives on unsafe.Pointer with windows/sys. Disabling since it is recommended to use go test instead. + # - go vet ./... + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +matrix: + allow_failures: + - environment: + GOROOT: C:\go-x86 + DOWNLOADPLATFORM: "x86" + - environment: + GOROOT: C:\go118 + DOWNLOADPLATFORM: "x64" + - environment: + GOROOT: C:\go118-x86 + DOWNLOADPLATFORM: "x86" + +install: - go version - go env - go get -u golang.org/x/tools/cmd/cover @@ -45,10 +60,9 @@ build_script: - cd c:\gopath\src\github.com\go-ole\go-ole - go get -v -t ./... - go build - - go test -v -cover ./... # disable automatic tests -test: off +test: on # disable deployment deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go index a9bef150a..cabbac012 100644 --- a/vendor/github.com/go-ole/go-ole/com.go +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -11,6 +11,7 @@ import ( var ( procCoInitialize = modole32.NewProc("CoInitialize") procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitializeSecurity = modole32.NewProc("CoInitializeSecurity") procCoUninitialize = modole32.NewProc("CoUninitialize") procCoCreateInstance = modole32.NewProc("CoCreateInstance") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") @@ -37,6 +38,9 @@ var ( procDispatchMessageW = moduser32.NewProc("DispatchMessageW") ) +// This is to enable calling COM Security initialization multiple times +var bSecurityInit bool = false + // coInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -68,6 +72,35 @@ func coInitializeEx(coinit uint32) (err error) { return } +// coInitializeSecurity: Registers security and sets the default security values +// for the process. +func coInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + // Check COM Security initialization has done previously + if !bSecurityInit { + // https://learn.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializesecurity + hr, _, _ := procCoInitializeSecurity.Call( + uintptr(0), // Allow *all* VSS writers to communicate back! + uintptr(cAuthSvc), // Default COM authentication service + uintptr(0), // Default COM authorization service + uintptr(0), // Reserved parameter + uintptr(dwAuthnLevel), // Strongest COM authentication level + uintptr(dwImpLevel), // Minimal impersonation abilities + uintptr(0), // Default COM authentication settings + uintptr(dwCapabilities), // Cloaking + uintptr(0)) // eserved parameter + if hr != 0 { + err = NewError(hr) + } else { + // COM Security initialization done make global flag true. + bSecurityInit = true + } + } + return +} + // CoInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -96,6 +129,15 @@ func CoUninitialize() { procCoUninitialize.Call() } +// CoInitializeSecurity: Registers security and sets the default security values +// for the process. +func CoInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + return coInitializeSecurity(cAuthSvc, dwAuthnLevel, dwImpLevel, dwCapabilities) +} + // CoTaskMemFree frees memory pointer. func CoTaskMemFree(memptr uintptr) { procCoTaskMemFree.Call(memptr) diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go index b399f0479..649c0734f 100644 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ole @@ -92,7 +93,7 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} case int8: vargs[n] = NewVariant(VT_I1, int64(v.(int8))) case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int8))))) case int16: vargs[n] = NewVariant(VT_I2, int64(v.(int16))) case *int16: diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go index 967a23fea..a2c8402f7 100644 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -99,7 +99,7 @@ func (v *VARIANT) Value() interface{} { case VT_DISPATCH: return v.ToIDispatch() case VT_BOOL: - return v.Val != 0 + return (v.Val & 0xffff) != 0 } return nil } diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md index b83ae159d..972b729fb 100644 --- a/vendor/github.com/rs/zerolog/README.md +++ b/vendor/github.com/rs/zerolog/README.md @@ -1,6 +1,6 @@ # Zero Allocation JSON Logger -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://github.com/rs/zerolog/actions/workflows/test.yml/badge.svg)](https://github.com/rs/zerolog/actions/workflows/test.yml) [![Go Coverage](https://github.com/rs/zerolog/wiki/coverage.svg)](https://raw.githack.com/wiki/rs/zerolog/coverage.html) The zerolog package provides a fast and simple logger dedicated to JSON output. @@ -499,7 +499,7 @@ log.Ctx(ctx).Info().Msg("hello world") ### Set as standard logger output ```go -stdlog := zerolog.New(os.Stdout).With(). +log := zerolog.New(os.Stdout).With(). Str("foo", "bar"). Logger() @@ -694,7 +694,7 @@ with zerolog library is [CSD](https://github.com/toravir/csd/). ## Benchmarks -See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. +See [logbench](http://bench.zerolog.io/) for more comprehensive and up-to-date benchmarks. All operations are allocation free (those numbers *include* JSON encoding): diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go index 8b0e0c619..282798853 100644 --- a/vendor/github.com/rs/zerolog/console.go +++ b/vendor/github.com/rs/zerolog/console.go @@ -312,6 +312,11 @@ func needsQuote(s string) bool { // colorize returns the string s wrapped in ANSI code c, unless disabled is true. func colorize(s interface{}, c int, disabled bool) string { + e := os.Getenv("NO_COLOR") + if e != "" { + disabled = true + } + if disabled { return fmt.Sprintf("%s", s) } diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go index 9d860e507..fc62ad9c1 100644 --- a/vendor/github.com/rs/zerolog/context.go +++ b/vendor/github.com/rs/zerolog/context.go @@ -57,7 +57,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context { // Object marshals an object that implement the LogObjectMarshaler interface. func (c Context) Object(key string, obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) e.Object(key, obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) @@ -66,7 +66,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context { // EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. func (c Context) EmbedObject(obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) e.EmbedObject(obj) c.l.context = enc.AppendObjectData(c.l.context, e.buf) putEvent(e) @@ -379,6 +379,11 @@ func (c Context) Interface(key string, i interface{}) Context { return c } +// Any is a wrapper around Context.Interface. +func (c Context) Any(key string, i interface{}) Context { + return c.Interface(key, i) +} + type callerHook struct { callerSkipFrameCount int } diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go index e7b5126e9..834c7e604 100644 --- a/vendor/github.com/rs/zerolog/log.go +++ b/vendor/github.com/rs/zerolog/log.go @@ -250,7 +250,7 @@ func New(w io.Writer) Logger { } lw, ok := w.(LevelWriter) if !ok { - lw = levelWriterAdapter{w} + lw = LevelWriterAdapter{w} } return Logger{w: lw, level: TraceLevel} } diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go index 26f5e6325..9b9ef88e8 100644 --- a/vendor/github.com/rs/zerolog/writer.go +++ b/vendor/github.com/rs/zerolog/writer.go @@ -17,11 +17,13 @@ type LevelWriter interface { WriteLevel(level Level, p []byte) (n int, err error) } -type levelWriterAdapter struct { +// LevelWriterAdapter adapts an io.Writer to support the LevelWriter interface. +type LevelWriterAdapter struct { io.Writer } -func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { +// WriteLevel simply writes everything to the adapted writer, ignoring the level. +func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { return lw.Write(p) } @@ -38,7 +40,7 @@ func SyncWriter(w io.Writer) io.Writer { if lw, ok := w.(LevelWriter); ok { return &syncWriter{lw: lw} } - return &syncWriter{lw: levelWriterAdapter{w}} + return &syncWriter{lw: LevelWriterAdapter{w}} } // Write implements the io.Writer interface. @@ -96,7 +98,7 @@ func MultiLevelWriter(writers ...io.Writer) LevelWriter { if lw, ok := w.(LevelWriter); ok { lwriters = append(lwriters, lw) } else { - lwriters = append(lwriters, levelWriterAdapter{w}) + lwriters = append(lwriters, LevelWriterAdapter{w}) } } return multiLevelWriter{lwriters} @@ -152,3 +154,29 @@ func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { w.Out = TestWriter{T: t, Frame: 6} } } + +// FilteredLevelWriter writes only logs at Level or above to Writer. +// +// It should be used only in combination with MultiLevelWriter when you +// want to write to multiple destinations at different levels. Otherwise +// you should just set the level on the logger and filter events early. +// When using MultiLevelWriter then you set the level on the logger to +// the lowest of the levels you use for writers. +type FilteredLevelWriter struct { + Writer LevelWriter + Level Level +} + +// Write writes to the underlying Writer. +func (w *FilteredLevelWriter) Write(p []byte) (int, error) { + return w.Writer.Write(p) +} + +// WriteLevel calls WriteLevel of the underlying Writer only if the level is equal +// or above the Level. +func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) { + if level >= w.Level { + return w.Writer.WriteLevel(level, p) + } + return len(p), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go index 6d7007ff9..089f603c8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!dragonfly,!plan9,!aix +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix +// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go index 1b64241ce..b5a20e366 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go @@ -259,8 +259,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { } case "Model Name", "model name", "cpu": c.ModelName = value - if strings.Contains(value, "POWER8") || - strings.Contains(value, "POWER7") { + if strings.Contains(value, "POWER") { c.Model = strings.Split(value, " ")[0] c.Family = "POWER" c.VendorID = "IBM" diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go new file mode 100644 index 000000000..1f66be342 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go @@ -0,0 +1,119 @@ +//go:build netbsd +// +build netbsd + +package cpu + +import ( + "context" + "fmt" + "runtime" + "unsafe" + + "github.com/shirou/gopsutil/v3/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +const ( + // sys/sysctl.h + ctlKern = 1 // "high kernel": proc, limits + ctlHw = 6 // CTL_HW + kernCpTime = 51 // KERN_CPTIME +) + +var ClocksPerSec = float64(100) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { + if !percpu { + mib := []int32{ctlKern, kernCpTime} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + stat := TimesStat{ + CPU: "cpu-total", + User: float64(times.User), + Nice: float64(times.Nice), + System: float64(times.Sys), + Idle: float64(times.Idle), + Irq: float64(times.Intr), + } + return []TimesStat{stat}, nil + } + + ncpu, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return + } + + var i uint32 + for i = 0; i < ncpu; i++ { + mib := []int32{ctlKern, kernCpTime, int32(i)} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + stats := (*cpuTimes)(unsafe.Pointer(&buf[0])) + ret = append(ret, TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(stats.User), + Nice: float64(stats.Nice), + System: float64(stats.Sys), + Idle: float64(stats.Idle), + Irq: float64(stats.Intr), + }) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on NetBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + mhz, err := unix.Sysctl("machdep.dmi.processor-frequency") + if err != nil { + return nil, err + } + _, err = fmt.Sscanf(mhz, "%f", &c.Mhz) + if err != nil { + return nil, err + } + + ncpu, err := unix.SysctlUint32("hw.ncpuonline") + if err != nil { + return nil, err + } + c.Cores = int32(ncpu) + + if c.ModelName, err = unix.Sysctl("machdep.dmi.processor-version"); err != nil { + return nil, err + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go new file mode 100644 index 000000000..57e14528d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..57e14528d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,9 @@ +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go index 4dc2bba58..67ae900bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_bsd.go @@ -1,5 +1,5 @@ -//go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd +//go:build darwin || freebsd || openbsd || netbsd +// +build darwin freebsd openbsd netbsd package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go index 1be2e8533..f045d4f17 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_darwin.go @@ -8,7 +8,7 @@ import ( "context" "encoding/binary" "errors" - "io/ioutil" + "io" "os" "strings" "unsafe" @@ -59,7 +59,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go b/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go index 585250f9a..a393ca15d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows +// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go index 2c9aa9d0d..9a5382d39 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_freebsd.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "encoding/binary" - "io/ioutil" + "io" "math" "os" "strings" @@ -54,7 +54,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } @@ -111,7 +111,7 @@ func getUsersFromUtmp(utmpfile string) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go index 62a4b3ca9..f9d7995e7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_linux.go @@ -8,7 +8,7 @@ import ( "context" "encoding/binary" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "regexp" @@ -91,7 +91,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return nil, err } @@ -138,13 +138,13 @@ func getlsbStruct(ctx context.Context) (*lsbStruct, error) { } switch field[0] { case "DISTRIB_ID": - ret.ID = field[1] + ret.ID = strings.ReplaceAll(field[1], `"`, ``) case "DISTRIB_RELEASE": - ret.Release = field[1] + ret.Release = strings.ReplaceAll(field[1], `"`, ``) case "DISTRIB_CODENAME": - ret.Codename = field[1] + ret.Codename = strings.ReplaceAll(field[1], `"`, ``) case "DISTRIB_DESCRIPTION": - ret.Description = field[1] + ret.Description = strings.ReplaceAll(field[1], `"`, ``) } } } else if common.PathExists("/usr/bin/lsb_release") { @@ -159,13 +159,13 @@ func getlsbStruct(ctx context.Context) (*lsbStruct, error) { } switch field[0] { case "Distributor ID": - ret.ID = field[1] + ret.ID = strings.ReplaceAll(field[1], `"`, ``) case "Release": - ret.Release = field[1] + ret.Release = strings.ReplaceAll(field[1], `"`, ``) case "Codename": - ret.Codename = field[1] + ret.Codename = strings.ReplaceAll(field[1], `"`, ``) case "Description": - ret.Description = field[1] + ret.Description = strings.ReplaceAll(field[1], `"`, ``) } } @@ -411,13 +411,13 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err } for _, file := range files { // Get the name of the temperature you are reading - name, err := ioutil.ReadFile(filepath.Join(file, "type")) + name, err := os.ReadFile(filepath.Join(file, "type")) if err != nil { warns.Add(err) continue } // Get the temperature reading - current, err := ioutil.ReadFile(filepath.Join(file, "temp")) + current, err := os.ReadFile(filepath.Join(file, "temp")) if err != nil { warns.Add(err) continue @@ -461,13 +461,13 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err // Get the label of the temperature you are reading label := "" - if raw, _ = ioutil.ReadFile(basepath + "_label"); len(raw) != 0 { + if raw, _ = os.ReadFile(basepath + "_label"); len(raw) != 0 { // Format the label from "Core 0" to "core_0" label = strings.Join(strings.Split(strings.TrimSpace(strings.ToLower(string(raw))), " "), "_") } // Get the name of the temperature you are reading - if raw, err = ioutil.ReadFile(filepath.Join(directory, "name")); err != nil { + if raw, err = os.ReadFile(filepath.Join(directory, "name")); err != nil { warns.Add(err) continue } @@ -479,7 +479,7 @@ func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, err } // Get the temperature reading - if raw, err = ioutil.ReadFile(file); err != nil { + if raw, err = os.ReadFile(file); err != nil { warns.Add(err) continue } @@ -513,7 +513,7 @@ func optionalValueReadFromFile(filename string) float64 { return 0 } - if raw, err = ioutil.ReadFile(filename); err != nil { + if raw, err = os.ReadFile(filename); err != nil { return 0 } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go new file mode 100644 index 000000000..488f1dfc2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_netbsd.go @@ -0,0 +1,55 @@ +//go:build netbsd +// +build netbsd + +package host + +import ( + "context" + "strings" + + "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" +) + +func HostIDWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func numProcs(ctx context.Context) (uint64, error) { + return 0, common.ErrNotImplementedError +} + +func PlatformInformationWithContext(ctx context.Context) (string, string, string, error) { + platform := "" + family := "" + version := "" + + p, err := unix.Sysctl("kern.ostype") + if err == nil { + platform = strings.ToLower(p) + } + v, err := unix.Sysctl("kern.osrelease") + if err == nil { + version = strings.ToLower(v) + } + + return platform, family, version, nil +} + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + return "", "", common.ErrNotImplementedError +} + +func UsersWithContext(ctx context.Context) ([]UserStat, error) { + var ret []UserStat + return ret, common.ErrNotImplementedError +} + +func SensorsTemperaturesWithContext(ctx context.Context) ([]TemperatureStat, error) { + return []TemperatureStat{}, common.ErrNotImplementedError +} + +func KernelVersionWithContext(ctx context.Context) (string, error) { + _, _, version, err := PlatformInformationWithContext(ctx) + return version, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go index 569de4abd..325015c23 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_openbsd.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "encoding/binary" - "io/ioutil" + "io" "os" "strings" "unsafe" @@ -65,7 +65,7 @@ func UsersWithContext(ctx context.Context) ([]UserStat, error) { } defer file.Close() - buf, err := ioutil.ReadAll(file) + buf, err := io.ReadAll(file) if err != nil { return ret, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go index 24529f19f..e7e0d837f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_posix.go @@ -1,5 +1,5 @@ -//go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris +// +build linux freebsd openbsd netbsd darwin solaris package host diff --git a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go index 7d3625acb..fef67f835 100644 --- a/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/host/host_solaris.go @@ -7,7 +7,6 @@ import ( "encoding/csv" "fmt" "io" - "io/ioutil" "os" "regexp" "strconv" @@ -60,7 +59,7 @@ func HostIDWithContext(ctx context.Context) (string, error) { // Count number of processes based on the number of entries in /proc func numProcs(ctx context.Context) (uint64, error) { - dirs, err := ioutil.ReadDir("/proc") + dirs, err := os.ReadDir("/proc") if err != nil { return 0, err } @@ -138,7 +137,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { // Find distribution name from /etc/release func parseReleaseFile() (string, error) { - b, err := ioutil.ReadFile("/etc/release") + b, err := os.ReadFile("/etc/release") if err != nil { return "", err } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 9bfece362..99ed6a58e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -14,7 +14,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -87,7 +86,7 @@ func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { fpath += "_" + i.Suffix } if PathExists(fpath) { - return ioutil.ReadFile(fpath) + return os.ReadFile(fpath) } return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) } @@ -100,7 +99,7 @@ var ErrNotImplementedError = errors.New("not implemented yet") // ReadFile reads contents from a file func ReadFile(filename string) (string, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return "", err } @@ -114,6 +113,30 @@ func ReadLines(filename string) ([]string, error) { return ReadLinesOffsetN(filename, 0, -1) } +// ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. +func ReadLine(filename string, prefix string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return "", err + } + if strings.HasPrefix(line, prefix) { + return line, nil + } + } + + return "", nil +} + // ReadLinesOffsetN reads contents from file and splits them by new line. // The offset tells at which line number to start. // The count determines the number of lines to read (starting from offset): diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go index b58edbeb0..a644687ba 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go @@ -62,17 +62,38 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { return 0, err } - statFile := "stat" + useStatFile := true if system == "lxc" && role == "guest" { // if lxc, /proc/uptime is used. - statFile = "uptime" + useStatFile = false } else if system == "docker" && role == "guest" { // also docker, guest - statFile = "uptime" + useStatFile = false } - filename := HostProcWithContext(ctx, statFile) + if useStatFile { + return readBootTimeStat(ctx) + } + + filename := HostProcWithContext(ctx, "uptime") lines, err := ReadLines(filename) + if err != nil { + return handleBootTimeFileReadErr(err) + } + if len(lines) != 1 { + return 0, fmt.Errorf("wrong uptime format") + } + f := strings.Fields(lines[0]) + b, err := strconv.ParseFloat(f[0], 64) + if err != nil { + return 0, err + } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + t := currentTime - b + return uint64(t), nil +} + +func handleBootTimeFileReadErr(err error) (uint64, error) { if os.IsPermission(err) { var info syscall.Sysinfo_t err := syscall.Sysinfo(&info) @@ -84,39 +105,27 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { t := currentTime - int64(info.Uptime) return uint64(t), nil } + return 0, err +} + +func readBootTimeStat(ctx context.Context) (uint64, error) { + filename := HostProcWithContext(ctx, "stat") + line, err := ReadLine(filename, "btime") if err != nil { - return 0, err + return handleBootTimeFileReadErr(err) } - - if statFile == "stat" { - for _, line := range lines { - if strings.HasPrefix(line, "btime") { - f := strings.Fields(line) - if len(f) != 2 { - return 0, fmt.Errorf("wrong btime format") - } - b, err := strconv.ParseInt(f[1], 10, 64) - if err != nil { - return 0, err - } - t := uint64(b) - return t, nil - } - } - } else if statFile == "uptime" { - if len(lines) != 1 { - return 0, fmt.Errorf("wrong uptime format") + if strings.HasPrefix(line, "btime") { + f := strings.Fields(line) + if len(f) != 2 { + return 0, fmt.Errorf("wrong btime format") } - f := strings.Fields(lines[0]) - b, err := strconv.ParseFloat(f[0], 64) + b, err := strconv.ParseInt(f[1], 10, 64) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) - t := currentTime - b - return uint64(t), nil + t := uint64(b) + return t, nil } - return 0, fmt.Errorf("could not find btime") } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go new file mode 100644 index 000000000..efbc710a5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go @@ -0,0 +1,66 @@ +//go:build netbsd +// +build netbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go index ff960dacc..edaf268bb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem.go @@ -50,6 +50,7 @@ type VirtualMemoryStat struct { // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html // https://www.kernel.org/doc/Documentation/filesystems/proc.txt // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + // https://www.kernel.org/doc/Documentation/vm/transhuge.txt Buffers uint64 `json:"buffers"` Cached uint64 `json:"cached"` WriteBack uint64 `json:"writeBack"` @@ -78,6 +79,7 @@ type VirtualMemoryStat struct { HugePagesRsvd uint64 `json:"hugePagesRsvd"` HugePagesSurp uint64 `json:"hugePagesSurp"` HugePageSize uint64 `json:"hugePageSize"` + AnonHugePages uint64 `json:"anonHugePages"` } type SwapMemoryStat struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go index ce930fbe4..ef867d742 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go @@ -1,5 +1,5 @@ -//go:build freebsd || openbsd -// +build freebsd openbsd +//go:build freebsd || openbsd || netbsd +// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go index 0b6c528f2..697fd8709 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix +//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd +// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go index 361d06bcc..214a91e47 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go @@ -311,6 +311,12 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu return ret, retEx, err } ret.HugePageSize = t * 1024 + case "AnonHugePages": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.AnonHugePages = t * 1024 } } @@ -371,25 +377,25 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { continue } ret.Sout = value * 4 * 1024 - case "pgpgIn": + case "pgpgin": value, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { continue } ret.PgIn = value * 4 * 1024 - case "pgpgOut": + case "pgpgout": value, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { continue } ret.PgOut = value * 4 * 1024 - case "pgFault": + case "pgfault": value, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { continue } ret.PgFault = value * 4 * 1024 - case "pgMajFault": + case "pgmajfault": value, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { continue diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go new file mode 100644 index 000000000..d1f54ecaf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go @@ -0,0 +1,87 @@ +//go:build netbsd +// +build netbsd + +package mem + +import ( + "context" + "errors" + "fmt" + + "golang.org/x/sys/unix" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + // Get buffers from vm.bufmem sysctl + ret.Buffers, err = unix.SysctlUint64("vm.bufmem") + if err != nil { + return nil, err + } + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go index de0ea7345..6e8ce67fb 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -643,7 +642,7 @@ func (p *process) getUids(ctx context.Context) ([]int32, error) { func (p *process) fillFromStatus(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -784,7 +783,7 @@ func processInetWithContext(ctx context.Context, file string, kind netConnection // This minimizes duplicates in the returned connections // For more info: // https://github.com/shirou/gopsutil/pull/361 - contents, err := ioutil.ReadFile(file) + contents, err := os.ReadFile(file) if err != nil { return nil, err } @@ -845,7 +844,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in // This minimizes duplicates in the returned connections // For more info: // https://github.com/shirou/gopsutil/pull/361 - contents, err := ioutil.ReadFile(file) + contents, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go index 37cb7ca44..f7989cd21 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -136,7 +135,7 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return false, err } @@ -391,7 +390,7 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M smapsPath = smapsRollupPath } } - contents, err := ioutil.ReadFile(smapsPath) + contents, err := os.ReadFile(smapsPath) if err != nil { return nil, err } @@ -484,7 +483,7 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { environPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "environ") - environContent, err := ioutil.ReadFile(environPath) + environContent, err := os.ReadFile(environPath) if err != nil { return nil, err } @@ -668,7 +667,7 @@ func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) { func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return "", err } @@ -682,7 +681,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return nil, err } @@ -705,7 +704,7 @@ func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) { pid := p.Pid ioPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "io") - ioline, err := ioutil.ReadFile(ioPath) + ioline, err := os.ReadFile(ioPath) if err != nil { return nil, err } @@ -741,7 +740,7 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) { pid := p.Pid memPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "statm") - contents, err := ioutil.ReadFile(memPath) + contents, err := os.ReadFile(memPath) if err != nil { return nil, nil, err } @@ -802,7 +801,7 @@ func (p *Process) fillNameWithContext(ctx context.Context) error { func (p *Process) fillFromCommWithContext(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "comm") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -819,7 +818,7 @@ func (p *Process) fillFromStatus() error { func (p *Process) fillFromStatusWithContext(ctx context.Context) error { pid := p.Pid statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return err } @@ -1026,7 +1025,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat") } - contents, err := ioutil.ReadFile(statPath) + contents, err := os.ReadFile(statPath) if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go index ad1c3cfc1..dd4bd4760 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go @@ -3,7 +3,6 @@ package process import ( "bytes" "context" - "io/ioutil" "os" "strconv" "strings" @@ -232,7 +231,7 @@ func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, erro func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, error) { pid := p.Pid execNamePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "execname") - exe, err := ioutil.ReadFile(execNamePath) + exe, err := os.ReadFile(execNamePath) if err != nil { return "", err } @@ -242,7 +241,7 @@ func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, erro func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return "", err } @@ -259,7 +258,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { pid := p.Pid cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") - cmdline, err := ioutil.ReadFile(cmdPath) + cmdline, err := os.ReadFile(cmdPath) if err != nil { return nil, err } diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md index da2bad447..96b2e4dc3 100644 --- a/vendor/github.com/tidwall/gjson/README.md +++ b/vendor/github.com/tidwall/gjson/README.md @@ -427,16 +427,6 @@ if result.Index > 0 { This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. -## Get multiple values at once - -The `GetMany` function can be used to get multiple values at the same time. - -```go -results := gjson.GetMany(json, "name.first", "name.last", "age") -``` - -The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. - ## Performance Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go index a1633be52..79498250a 100644 --- a/vendor/github.com/tidwall/gjson/gjson.go +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -3410,7 +3410,7 @@ func (t Result) Path(json string) string { if !rcomp.Exists() { goto fail } - comp := escapeComp(rcomp.String()) + comp := Escape(rcomp.String()) path = append(path, '.') path = append(path, comp...) } @@ -3425,17 +3425,31 @@ fail: // isSafePathKeyChar returns true if the input character is safe for not // needing escaping. func isSafePathKeyChar(c byte) bool { - return c <= ' ' || c > '~' || c == '_' || c == '-' || c == ':' || - (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c <= ' ' || c > '~' || c == '_' || + c == '-' || c == ':' } -// escapeComp escaped a path compontent, making it safe for generating a -// path for later use. -func escapeComp(comp string) string { +// Escape returns an escaped path component. +// +// json := `{ +// "user":{ +// "first.name": "Janet", +// "last.name": "Prichard" +// } +// }` +// user := gjson.Get(json, "user") +// println(user.Get(gjson.Escape("first.name")) +// println(user.Get(gjson.Escape("last.name")) +// // Output: +// // Janet +// // Prichard +func Escape(comp string) string { for i := 0; i < len(comp); i++ { if !isSafePathKeyChar(comp[i]) { - ncomp := []byte(comp[:i]) + ncomp := make([]byte, len(comp)+1) + copy(ncomp, comp[:i]) + ncomp = ncomp[:i] for ; i < len(comp); i++ { if !isSafePathKeyChar(comp[i]) { ncomp = append(ncomp, '\\') diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 6be2c3548..1b27f1962 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,6 +1,6 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.19.1 + GO_VERSION: go1.20 freebsd_12_task: freebsd_instance: diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go index 4a5197b2f..3f5d83f69 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go @@ -5,6 +5,10 @@ package sysconf import ( + "strconv" + "strings" + "sync" + "golang.org/x/sys/unix" ) @@ -14,8 +18,14 @@ const ( _SYMLOOP_MAX = _MAXSYMLINKS ) -// sysconf implements sysconf(3) as in the Darwin libc, version 1244.30.3 -// (derived from the FreeBSD libc). +var uname struct { + sync.Once + macOSMajor int +} + +// sysconf implements sysconf(4) as in the Darwin libc (derived from the FreeBSD +// libc), version 1534.81.1. +// See https://github.com/apple-oss-distributions/Libc/tree/Libc-1534.81.1. func sysconf(name int) (int64, error) { switch name { case SC_AIO_LISTIO_MAX: @@ -54,12 +64,16 @@ func sysconf(name int) (int64, error) { return sysctl32("kern.ngroups"), nil case SC_OPEN_MAX, SC_STREAM_MAX: var rlim unix.Rlimit - if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { - if rlim.Cur != unix.RLIM_INFINITY { - return int64(rlim.Cur), nil - } + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { + return -1, nil } - return -1, nil + if rlim.Cur > unix.RLIM_INFINITY { + return -1, nil + } + if rlim.Cur > _LONG_MAX { + return -1, unix.EOVERFLOW + } + return int64(rlim.Cur), nil case SC_RTSIG_MAX: return -1, nil case SC_SEM_NSEMS_MAX: @@ -126,7 +140,22 @@ func sysconf(name int) (int64, error) { } return _POSIX_SEMAPHORES, nil case SC_SPAWN: - return _POSIX_SPAWN, nil + uname.Once.Do(func() { + var u unix.Utsname + err := unix.Uname(&u) + if err != nil { + return + } + rel := unix.ByteSliceToString(u.Release[:]) + ver := strings.Split(rel, ".") + maj, _ := strconv.Atoi(ver[0]) + uname.macOSMajor = maj + }) + if uname.macOSMajor < 22 { + return -1, nil + } + // macOS 13 (Ventura) and later + return 200112, nil case SC_SPIN_LOCKS: return _POSIX_SPIN_LOCKS, nil case SC_SPORADIC_SERVER: diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go index 6fa7fde8a..6fadf3db1 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go @@ -181,7 +181,6 @@ const ( _POSIX_SHARED_MEMORY_OBJECTS = -0x1 _POSIX_SHELL = 0x30db0 _POSIX_SIGQUEUE_MAX = 0x20 - _POSIX_SPAWN = -0x1 _POSIX_SPIN_LOCKS = -0x1 _POSIX_SPORADIC_SERVER = -0x1 _POSIX_SS_REPL_MAX = 0x4 @@ -248,7 +247,8 @@ const ( const ( _CHAR_BIT = 0x8 - _INT_MAX = 0x7fffffff + _INT_MAX = 0x7fffffff + _LONG_MAX = 0x7fffffffffffffff sizeofOffT = 0x8 ) diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 000000000..ecc0dabb7 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 8f775fafa..47fa6a7eb 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -583,6 +583,7 @@ ccflags="$@" $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a730878e4..0ba030197 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2471,6 +2471,29 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask * return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) } +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8bb30e7ce..f6eda2705 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -549,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3784f402e..0787a043b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2821,6 +2821,23 @@ const ( RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index a07321bed..14ab34a56 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2197,3 +2197,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 26ef52aaf..494493c78 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -5868,3 +5868,18 @@ const ( VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 373d16388..67bad0926 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -216,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -437,6 +437,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1624,6 +1628,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 566dd3e31..5c385580f 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -468,6 +469,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -2367,11 +2370,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -4017,6 +4017,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go index 423386bf4..e4250ae22 100644 --- a/vendor/golang.org/x/text/unicode/norm/trie.go +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -29,7 +29,7 @@ var ( nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff --git a/vendor/modules.txt b/vendor/modules.txt index bf99fe05a..ce202e218 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,7 +4,7 @@ dario.cat/mergo # github.com/bbrks/wrap/v2 v2.5.0 ## explicit; go 1.13 github.com/bbrks/wrap/v2 -# github.com/go-ole/go-ole v1.2.6 +# github.com/go-ole/go-ole v1.3.0 ## explicit; go 1.12 github.com/go-ole/go-ole github.com/go-ole/go-ole/oleutil @@ -44,7 +44,7 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.19 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mattn/go-runewidth v0.0.14 +# github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth # github.com/olekukonko/tablewriter v0.0.5 @@ -58,13 +58,13 @@ github.com/power-devops/perfstat github.com/rivo/uniseg # github.com/rogpeppe/go-internal v1.11.0 ## explicit; go 1.19 -# github.com/rs/zerolog v1.30.0 +# github.com/rs/zerolog v1.31.0 ## explicit; go 1.15 github.com/rs/zerolog github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json github.com/rs/zerolog/log -# github.com/shirou/gopsutil/v3 v3.23.7 +# github.com/shirou/gopsutil/v3 v3.23.9 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/cpu @@ -82,7 +82,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/tidwall/gjson v1.16.0 +# github.com/tidwall/gjson v1.17.0 ## explicit; go 1.12 github.com/tidwall/gjson # github.com/tidwall/match v1.1.1 @@ -94,7 +94,7 @@ github.com/tidwall/pretty # github.com/tidwall/sjson v1.2.5 ## explicit; go 1.14 github.com/tidwall/sjson -# github.com/tklauser/go-sysconf v0.3.11 +# github.com/tklauser/go-sysconf v0.3.12 ## explicit; go 1.13 github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.6.1 @@ -106,16 +106,19 @@ github.com/yusufpapurcu/wmi # github.com/zekroTJA/timedmap v1.5.1 ## explicit; go 1.13 github.com/zekroTJA/timedmap -# golang.org/x/sys v0.11.0 +# golang.org/x/exp v0.0.0-20230905200255-921286631fa9 +## explicit; go 1.20 +golang.org/x/exp/maps +# golang.org/x/sys v0.12.0 ## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.11.0 +# golang.org/x/term v0.12.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.12.0 +# golang.org/x/text v0.13.0 ## explicit; go 1.17 golang.org/x/text/cases golang.org/x/text/internal