Skip to content

Commit 9ff1a7b

Browse files
author
Howard Kan
committed
Add Qos Rate Limit time to Array, Host and Pod
Add the following information of vgroup under /metrics and /metrics/volumes 1. QoS 2. QoS Rate Limit Time and other latency 3. BW 4. IO size per op.
1 parent b9389b4 commit 9ff1a7b

18 files changed

+1049
-33
lines changed

internal/openmetrics-exporter/arrays_performance_collector.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,24 @@ func (c *ArrayPerformanceCollector) Collect(ch chan<- prometheus.Metric) {
7878
ap.ServiceUsecPerWriteOp,
7979
"service_usec_per_write_op",
8080
)
81+
ch <- prometheus.MustNewConstMetric(
82+
c.LatencyDesc,
83+
prometheus.GaugeValue,
84+
ap.QosRateLimitUsecPerMirroredWriteOp,
85+
"qos_rate_limit_usec_per_mirrored_write_op",
86+
)
87+
ch <- prometheus.MustNewConstMetric(
88+
c.LatencyDesc,
89+
prometheus.GaugeValue,
90+
ap.QosRateLimitUsecPerReadOp,
91+
"qos_rate_limit_usec_per_read_op",
92+
)
93+
ch <- prometheus.MustNewConstMetric(
94+
c.LatencyDesc,
95+
prometheus.GaugeValue,
96+
ap.QosRateLimitUsecPerWriteOp,
97+
"qos_rate_limit_usec_per_write_op",
98+
)
8199
ch <- prometheus.MustNewConstMetric(
82100
c.LatencyDesc,
83101
prometheus.GaugeValue,

internal/openmetrics-exporter/arrays_performance_collector_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,9 @@ func TestArrayPerformanceCollector(t *testing.T) {
3737
defer server.Close()
3838
p := arrs.Items[0]
3939
want := make(map[string]bool)
40+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_mirrored_write_op\"} gauge:{value:%g}", p.QosRateLimitUsecPerMirroredWriteOp)] = true
41+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_read_op\"} gauge:{value:%g}", p.QosRateLimitUsecPerReadOp)] = true
42+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_write_op\"} gauge:{value:%g}", p.QosRateLimitUsecPerWriteOp)] = true
4043
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_mirrored_write_op\"} gauge:{value:%g}", p.QueueUsecPerMirroredWriteOp)] = true
4144
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_read_op\"} gauge:{value:%g}", p.QueueUsecPerReadOp)] = true
4245
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_write_op\"} gauge:{value:%g}", p.QueueUsecPerWriteOp)] = true

internal/openmetrics-exporter/collector.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,9 +74,13 @@ func Collector(ctx context.Context, metrics string, registry *prometheus.Registr
7474
vols := faclient.GetVolumes()
7575
volperfcoll := NewVolumesPerformanceCollector(faclient, vols)
7676
volspacecoll := NewVolumesCollector(vols)
77+
vgroupcoll := NewVolumeGroupsCollector(faclient)
78+
vgroupperfcoll := NewVolumeGroupsPerformanceCollector(faclient)
7779
registry.MustRegister(
7880
volperfcoll,
7981
volspacecoll,
82+
vgroupcoll,
83+
vgroupperfcoll,
8084
)
8185
}
8286
return true

internal/openmetrics-exporter/hosts_performance_collector.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,24 @@ func (c *HostsPerformanceCollector) Collect(ch chan<- prometheus.Metric) {
102102
float64(hp.ServiceUsecPerReadOpCacheReduction),
103103
hp.Name, "service_usec_per_read_op_cache_reduction",
104104
)
105+
ch <- prometheus.MustNewConstMetric(
106+
c.LatencyDesc,
107+
prometheus.GaugeValue,
108+
float64(hp.QosRateLimitUsecPerMirroredWriteOp),
109+
hp.Name, "qos_rate_limit_usec_per_mirrored_write_op",
110+
)
111+
ch <- prometheus.MustNewConstMetric(
112+
c.LatencyDesc,
113+
prometheus.GaugeValue,
114+
float64(hp.QosRateLimitUsecPerReadOp),
115+
hp.Name, "qos_rate_limit_usec_per_read_op",
116+
)
117+
ch <- prometheus.MustNewConstMetric(
118+
c.LatencyDesc,
119+
prometheus.GaugeValue,
120+
float64(hp.QosRateLimitUsecPerWriteOp),
121+
hp.Name, "qos_rate_limit_usec_per_write_op",
122+
)
105123
ch <- prometheus.MustNewConstMetric(
106124
c.BandwidthDesc,
107125
prometheus.GaugeValue,

internal/openmetrics-exporter/hosts_performance_collector_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ func TestHostsPerformanceCollector(t *testing.T) {
3636
defer server.Close()
3737
want := make(map[string]bool)
3838
for _, p := range hosts.Items {
39+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_mirrored_write_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerMirroredWriteOp)] = true
40+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_read_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerReadOp)] = true
41+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_write_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerWriteOp)] = true
3942
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_mirrored_write_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerMirroredWriteOp)] = true
4043
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_read_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerReadOp)] = true
4144
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_write_op\"} label:{name:\"host\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerWriteOp)] = true

internal/openmetrics-exporter/pods_performance_collector.go

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,24 @@ func (c *PodsPerformanceCollector) Collect(ch chan<- prometheus.Metric) {
7878
hp.ServiceUsecPerWriteOp,
7979
hp.Name, "service_usec_per_write_op",
8080
)
81+
ch <- prometheus.MustNewConstMetric(
82+
c.LatencyDesc,
83+
prometheus.GaugeValue,
84+
hp.QosRateLimitUsecPerMirroredWriteOp,
85+
hp.Name, "qos_rate_limit_usec_per_mirrored_write_op",
86+
)
87+
ch <- prometheus.MustNewConstMetric(
88+
c.LatencyDesc,
89+
prometheus.GaugeValue,
90+
hp.QosRateLimitUsecPerReadOp,
91+
hp.Name, "qos_rate_limit_usec_per_read_op",
92+
)
93+
ch <- prometheus.MustNewConstMetric(
94+
c.LatencyDesc,
95+
prometheus.GaugeValue,
96+
hp.QosRateLimitUsecPerWriteOp,
97+
hp.Name, "qos_rate_limit_usec_per_write_op",
98+
)
8199
ch <- prometheus.MustNewConstMetric(
82100
c.LatencyDesc,
83101
prometheus.GaugeValue,

internal/openmetrics-exporter/pods_performance_collector_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ func TestPodsPerformanceCollector(t *testing.T) {
3636
defer server.Close()
3737
want := make(map[string]bool)
3838
for _, p := range pods.Items {
39+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_mirrored_write_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerMirroredWriteOp)] = true
40+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_read_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerReadOp)] = true
41+
want[fmt.Sprintf("label:{name:\"dimension\" value:\"qos_rate_limit_usec_per_write_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QosRateLimitUsecPerWriteOp)] = true
3942
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_mirrored_write_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerMirroredWriteOp)] = true
4043
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_read_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerReadOp)] = true
4144
want[fmt.Sprintf("label:{name:\"dimension\" value:\"queue_usec_per_write_op\"} label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", p.Name, p.QueueUsecPerWriteOp)] = true
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
package collectors
2+
3+
import (
4+
client "purestorage/fa-openmetrics-exporter/internal/rest-client"
5+
6+
"github.com/prometheus/client_golang/prometheus"
7+
)
8+
9+
type VolumeGroupsCollector struct {
10+
QoSBandwidthLimitDesc *prometheus.Desc
11+
QoSIPOSLimitDesc *prometheus.Desc
12+
Client *client.FAClient
13+
}
14+
15+
func (c *VolumeGroupsCollector) Describe(ch chan<- *prometheus.Desc) {
16+
prometheus.DescribeByCollect(c, ch)
17+
}
18+
19+
func (c *VolumeGroupsCollector) Collect(ch chan<- prometheus.Metric) {
20+
volumeGroups := c.Client.GetVolumeGroups()
21+
if len(volumeGroups.Items) == 0 {
22+
return
23+
}
24+
for _, vg := range volumeGroups.Items {
25+
if vg.QoS.BandwidthLimit != nil {
26+
ch <- prometheus.MustNewConstMetric(
27+
c.QoSBandwidthLimitDesc,
28+
prometheus.GaugeValue,
29+
float64(*vg.QoS.BandwidthLimit),
30+
vg.Name,
31+
)
32+
}
33+
if vg.QoS.IopsLimit != nil {
34+
ch <- prometheus.MustNewConstMetric(
35+
c.QoSIPOSLimitDesc,
36+
prometheus.GaugeValue,
37+
float64(*vg.QoS.IopsLimit),
38+
vg.Name,
39+
)
40+
}
41+
}
42+
}
43+
44+
func NewVolumeGroupsCollector(fa *client.FAClient) *VolumeGroupsCollector {
45+
return &VolumeGroupsCollector{
46+
QoSBandwidthLimitDesc: prometheus.NewDesc(
47+
"purefa_volume_group_qos_bandwidth_bytes_per_sec_limit",
48+
"FlashArray volume group maximum QoS bandwidth limit in bytes per second",
49+
[]string{"name"},
50+
prometheus.Labels{},
51+
),
52+
QoSIPOSLimitDesc: prometheus.NewDesc(
53+
"purefa_volume_group_qos_iops_limit",
54+
"FlashArray volume group QoS IOPs limit",
55+
[]string{"name"},
56+
prometheus.Labels{},
57+
),
58+
Client: fa,
59+
}
60+
}
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
package collectors
2+
3+
import (
4+
"encoding/json"
5+
"fmt"
6+
"net/http"
7+
"net/http/httptest"
8+
"os"
9+
client "purestorage/fa-openmetrics-exporter/internal/rest-client"
10+
"regexp"
11+
"strings"
12+
"testing"
13+
)
14+
15+
func TestVolumesSpaceCollector(t *testing.T) {
16+
res, _ := os.ReadFile("../../test/data/volume_groups.json")
17+
vers, _ := os.ReadFile("../../test/data/versions.json")
18+
var volumeGroups client.VolumeGroupsList
19+
json.Unmarshal(res, &volumeGroups)
20+
server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
21+
valid := regexp.MustCompile(`^/api/([0-9]+.[0-9]+)?/volume-groups$`)
22+
if r.URL.Path == "/api/api_version" {
23+
w.Header().Set("Content-Type", "application/json")
24+
w.WriteHeader(http.StatusOK)
25+
w.Write([]byte(vers))
26+
} else if valid.MatchString(r.URL.Path) {
27+
w.Header().Set("Content-Type", "application/json")
28+
w.WriteHeader(http.StatusOK)
29+
w.Write([]byte(res))
30+
}
31+
}))
32+
endp := strings.Split(server.URL, "/")
33+
e := endp[len(endp)-1]
34+
want := make(map[string]bool)
35+
for _, vg := range volumeGroups.Items {
36+
if vg.QoS.BandwidthLimit != nil {
37+
want[fmt.Sprintf("label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", vg.Name, float64(*v.QoS.BandwidthLimit))] = true
38+
}
39+
if vg.QoS.IopsLimit != nil {
40+
want[fmt.Sprintf("label:{name:\"name\" value:\"%s\"} gauge:{value:%g}", vg.Name, float64(*v.QoS.IopsLimit))] = true
41+
}
42+
}
43+
defer server.Close()
44+
c := client.NewRestClient(e, "fake-api-token", "latest", "test-user-agent-string", "test-X-Request-Id-string", false)
45+
vgc := NewVolumeGroupsCollector(c)
46+
metricsCheck(t, vgc, want)
47+
}

0 commit comments

Comments
 (0)