|
| 1 | +//go:build integration |
| 2 | + |
| 3 | +package collector |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "testing" |
| 8 | + "time" |
| 9 | + |
| 10 | + "github.com/prometheus/client_golang/prometheus" |
| 11 | + dto "github.com/prometheus/client_model/go" |
| 12 | + "go.mongodb.org/mongo-driver/v2/bson" |
| 13 | + |
| 14 | + "github.com/ppiankov/mongopulse/internal/config" |
| 15 | + "github.com/ppiankov/mongopulse/internal/metrics" |
| 16 | + "github.com/ppiankov/mongopulse/internal/testutil" |
| 17 | +) |
| 18 | + |
| 19 | +func gaugeValue(g *prometheus.GaugeVec, labels ...string) float64 { |
| 20 | + m := &dto.Metric{} |
| 21 | + if err := g.WithLabelValues(labels...).Write(m); err != nil { |
| 22 | + return 0 |
| 23 | + } |
| 24 | + return m.GetGauge().GetValue() |
| 25 | +} |
| 26 | + |
| 27 | +func counterValue(c *prometheus.CounterVec, labels ...string) float64 { |
| 28 | + m := &dto.Metric{} |
| 29 | + if err := c.WithLabelValues(labels...).Write(m); err != nil { |
| 30 | + return 0 |
| 31 | + } |
| 32 | + return m.GetCounter().GetValue() |
| 33 | +} |
| 34 | + |
| 35 | +func setupCollector(t *testing.T) (*Collector, *metrics.Metrics, string) { |
| 36 | + t.Helper() |
| 37 | + client, _ := testutil.StartMongo(t) |
| 38 | + |
| 39 | + reg := prometheus.NewRegistry() |
| 40 | + m := metrics.New(reg) |
| 41 | + cfg := config.Config{ |
| 42 | + DSN: []string{"mongodb://localhost:27017"}, |
| 43 | + PollInterval: 5 * time.Second, |
| 44 | + SlowQueryThreshold: 5 * time.Second, |
| 45 | + RegressionThreshold: 2.0, |
| 46 | + StmtLimit: 50, |
| 47 | + } |
| 48 | + node := "test-node" |
| 49 | + c := New(client, node, m, cfg, nil, nil) |
| 50 | + return c, m, node |
| 51 | +} |
| 52 | + |
| 53 | +func TestCollectServerStatus(t *testing.T) { |
| 54 | + c, m, node := setupCollector(t) |
| 55 | + ctx := context.Background() |
| 56 | + |
| 57 | + ss, err := c.collectServerStatus(ctx) |
| 58 | + if err != nil { |
| 59 | + t.Fatalf("collectServerStatus: %v", err) |
| 60 | + } |
| 61 | + if ss == nil { |
| 62 | + t.Fatal("serverStatus result is nil") |
| 63 | + } |
| 64 | + |
| 65 | + up := gaugeValue(m.Up, node) |
| 66 | + if up != 1 { |
| 67 | + t.Errorf("Up = %f, want 1", up) |
| 68 | + } |
| 69 | + |
| 70 | + uptime := gaugeValue(m.Uptime, node) |
| 71 | + if uptime <= 0 { |
| 72 | + t.Errorf("Uptime = %f, want > 0", uptime) |
| 73 | + } |
| 74 | + |
| 75 | + // Version info should have a non-empty version label. |
| 76 | + if v, ok := ss["version"].(string); !ok || v == "" { |
| 77 | + t.Error("serverStatus missing version field") |
| 78 | + } |
| 79 | +} |
| 80 | + |
| 81 | +func TestCollectConnections(t *testing.T) { |
| 82 | + c, m, node := setupCollector(t) |
| 83 | + ctx := context.Background() |
| 84 | + |
| 85 | + ss, err := c.collectServerStatus(ctx) |
| 86 | + if err != nil { |
| 87 | + t.Fatalf("collectServerStatus: %v", err) |
| 88 | + } |
| 89 | + |
| 90 | + c.collectConnections(ctx, ss) |
| 91 | + |
| 92 | + current := gaugeValue(m.ConnCurrent, node) |
| 93 | + if current <= 0 { |
| 94 | + t.Errorf("ConnCurrent = %f, want > 0", current) |
| 95 | + } |
| 96 | + |
| 97 | + available := gaugeValue(m.ConnAvailable, node) |
| 98 | + if available <= 0 { |
| 99 | + t.Errorf("ConnAvailable = %f, want > 0", available) |
| 100 | + } |
| 101 | +} |
| 102 | + |
| 103 | +func TestCollectOpcounters(t *testing.T) { |
| 104 | + c, m, node := setupCollector(t) |
| 105 | + ctx := context.Background() |
| 106 | + |
| 107 | + // Get initial opcounters. |
| 108 | + ss1, err := c.collectServerStatus(ctx) |
| 109 | + if err != nil { |
| 110 | + t.Fatalf("collectServerStatus: %v", err) |
| 111 | + } |
| 112 | + c.collectOpcounters(ctx, ss1) |
| 113 | + insertBefore := counterValue(m.OpsTotal, node, "insert") |
| 114 | + |
| 115 | + // Insert a document to increment insert counter. |
| 116 | + _, err = c.client.Database("testdb_opcounters").Collection("testcoll").InsertOne(ctx, bson.M{"key": "value"}) |
| 117 | + if err != nil { |
| 118 | + t.Fatalf("insert: %v", err) |
| 119 | + } |
| 120 | + |
| 121 | + // Re-collect. |
| 122 | + ss2, err := c.collectServerStatus(ctx) |
| 123 | + if err != nil { |
| 124 | + t.Fatalf("collectServerStatus: %v", err) |
| 125 | + } |
| 126 | + c.collectOpcounters(ctx, ss2) |
| 127 | + insertAfter := counterValue(m.OpsTotal, node, "insert") |
| 128 | + |
| 129 | + if insertAfter <= insertBefore { |
| 130 | + t.Errorf("insert counter did not increase: before=%f, after=%f", insertBefore, insertAfter) |
| 131 | + } |
| 132 | +} |
| 133 | + |
| 134 | +func TestCollectDbStats(t *testing.T) { |
| 135 | + c, m, node := setupCollector(t) |
| 136 | + ctx := context.Background() |
| 137 | + |
| 138 | + // Insert data so dbStats has something to report. |
| 139 | + db := c.client.Database("testdb_dbstats") |
| 140 | + _, err := db.Collection("testcoll").InsertOne(ctx, bson.M{"data": "hello world"}) |
| 141 | + if err != nil { |
| 142 | + t.Fatalf("insert: %v", err) |
| 143 | + } |
| 144 | + |
| 145 | + c.collectDbStats(ctx) |
| 146 | + |
| 147 | + dataSize := gaugeValue(m.DbDataSize, node, "testdb_dbstats") |
| 148 | + if dataSize <= 0 { |
| 149 | + t.Errorf("DbDataSize = %f, want > 0", dataSize) |
| 150 | + } |
| 151 | +} |
| 152 | + |
| 153 | +func TestCollectCollections(t *testing.T) { |
| 154 | + c, m, node := setupCollector(t) |
| 155 | + ctx := context.Background() |
| 156 | + |
| 157 | + db := c.client.Database("testdb_collections") |
| 158 | + _, err := db.Collection("mycoll").InsertOne(ctx, bson.M{"n": 1}) |
| 159 | + if err != nil { |
| 160 | + t.Fatalf("insert: %v", err) |
| 161 | + } |
| 162 | + |
| 163 | + c.collectCollections(ctx) |
| 164 | + |
| 165 | + docCount := gaugeValue(m.CollDocCount, node, "testdb_collections", "mycoll") |
| 166 | + if docCount < 1 { |
| 167 | + t.Errorf("CollDocCount = %f, want >= 1", docCount) |
| 168 | + } |
| 169 | +} |
| 170 | + |
| 171 | +func TestCollect_FullFlow(t *testing.T) { |
| 172 | + c, m, node := setupCollector(t) |
| 173 | + ctx := context.Background() |
| 174 | + |
| 175 | + // Insert some data to make metrics non-trivial. |
| 176 | + db := c.client.Database("testdb_fullflow") |
| 177 | + _, err := db.Collection("items").InsertOne(ctx, bson.M{"item": "test"}) |
| 178 | + if err != nil { |
| 179 | + t.Fatalf("insert: %v", err) |
| 180 | + } |
| 181 | + |
| 182 | + // Run full collection — should not panic or error. |
| 183 | + c.Collect(ctx) |
| 184 | + |
| 185 | + // Verify key metrics are set. |
| 186 | + up := gaugeValue(m.Up, node) |
| 187 | + if up != 1 { |
| 188 | + t.Errorf("Up = %f, want 1 after Collect()", up) |
| 189 | + } |
| 190 | + |
| 191 | + uptime := gaugeValue(m.Uptime, node) |
| 192 | + if uptime <= 0 { |
| 193 | + t.Errorf("Uptime = %f, want > 0", uptime) |
| 194 | + } |
| 195 | + |
| 196 | + pollDuration := gaugeValue(m.PollDuration, node) |
| 197 | + if pollDuration <= 0 { |
| 198 | + t.Errorf("PollDuration = %f, want > 0", pollDuration) |
| 199 | + } |
| 200 | +} |
| 201 | + |
| 202 | +func TestCollect_ReplicationGracefulSkip(t *testing.T) { |
| 203 | + c, _, _ := setupCollector(t) |
| 204 | + ctx := context.Background() |
| 205 | + |
| 206 | + // Standalone MongoDB — replication collector should not fail the whole Collect. |
| 207 | + err := c.collectReplication(ctx) |
| 208 | + if err == nil { |
| 209 | + t.Log("collectReplication returned nil — unexpected for standalone, but not fatal") |
| 210 | + } |
| 211 | + // The key assertion: it should return an error (standalone), but not panic. |
| 212 | +} |
| 213 | + |
| 214 | +func TestToFloat64(t *testing.T) { |
| 215 | + t.Parallel() |
| 216 | + tests := []struct { |
| 217 | + name string |
| 218 | + in interface{} |
| 219 | + want float64 |
| 220 | + ok bool |
| 221 | + }{ |
| 222 | + {"float64", float64(3.14), 3.14, true}, |
| 223 | + {"int32", int32(42), 42, true}, |
| 224 | + {"int64", int64(100), 100, true}, |
| 225 | + {"int", int(7), 7, true}, |
| 226 | + {"string", "nope", 0, false}, |
| 227 | + {"nil", nil, 0, false}, |
| 228 | + {"bool", true, 0, false}, |
| 229 | + } |
| 230 | + for _, tt := range tests { |
| 231 | + t.Run(tt.name, func(t *testing.T) { |
| 232 | + t.Parallel() |
| 233 | + got, ok := toFloat64(tt.in) |
| 234 | + if ok != tt.ok { |
| 235 | + t.Errorf("ok = %v, want %v", ok, tt.ok) |
| 236 | + } |
| 237 | + if got != tt.want { |
| 238 | + t.Errorf("value = %f, want %f", got, tt.want) |
| 239 | + } |
| 240 | + }) |
| 241 | + } |
| 242 | +} |
| 243 | + |
| 244 | +func TestIsSystemDB(t *testing.T) { |
| 245 | + t.Parallel() |
| 246 | + tests := []struct { |
| 247 | + name string |
| 248 | + want bool |
| 249 | + }{ |
| 250 | + {"admin", true}, |
| 251 | + {"local", true}, |
| 252 | + {"config", true}, |
| 253 | + {"mydb", false}, |
| 254 | + {"", false}, |
| 255 | + } |
| 256 | + for _, tt := range tests { |
| 257 | + t.Run(tt.name, func(t *testing.T) { |
| 258 | + t.Parallel() |
| 259 | + if got := isSystemDB(tt.name); got != tt.want { |
| 260 | + t.Errorf("isSystemDB(%q) = %v, want %v", tt.name, got, tt.want) |
| 261 | + } |
| 262 | + }) |
| 263 | + } |
| 264 | +} |
0 commit comments