diff --git a/tests/unit/handlers/metrics.handlers.test.js b/tests/unit/handlers/metrics.handlers.test.js new file mode 100644 index 0000000..b067538 --- /dev/null +++ b/tests/unit/handlers/metrics.handlers.test.js @@ -0,0 +1,1289 @@ +'use strict' + +const test = require('brittle') +const { + getHashrate, + processHashrateData, + calculateHashrateSummary, + getConsumption, + processConsumptionData, + calculateConsumptionSummary, + getEfficiency, + processEfficiencyData, + calculateEfficiencySummary, + getMinerStatus, + processMinerStatusData, + calculateMinerStatusSummary, + sumObjectValues, + parseEntryTs, + resolveInterval, + getIntervalConfig, + getPowerMode, + processPowerModeData, + calculatePowerModeSummary, + categorizeMiner, + getPowerModeTimeline, + processPowerModeTimelineData, + getTemperature, + processTemperatureData, + calculateTemperatureSummary +} = require('../../../workers/lib/server/handlers/metrics.handlers') + +// ==================== Hashrate Tests ==================== + +test('getHashrate - happy path', async (t) => { + const dayTs = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ type: 'miner', data: [{ ts: dayTs, val: { hashrate_mhs_5m_sum_aggr: 100000 } }], error: null }] + } + } + } + + const mockReq = { + query: { start: 1700000000000, end: 1700100000000 } + } + + const result = await getHashrate(mockCtx, mockReq) + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].hashrateMhs, 100000, 'should have hashrate value') + t.ok(result.summary.avgHashrateMhs !== null, 'should have avg hashrate') + t.is(result.summary.totalHashrateMhs, 100000, 'should have total hashrate') + t.pass() +}) + +test('getHashrate - missing start throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getHashrate(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getHashrate - missing end throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getHashrate(mockCtx, { query: { start: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getHashrate - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getHashrate(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getHashrate - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getHashrate(mockCtx, { query: { start: 1700000000000, end: 1700100000000 } }) + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.is(result.log.length, 0, 'log should be empty with no data') + t.is(result.summary.totalHashrateMhs, 0, 'total should be zero') + t.is(result.summary.avgHashrateMhs, null, 'avg should be null') + t.pass() +}) + +test('processHashrateData - processes array data from ORK', (t) => { + const results = [ + [{ type: 'miner', data: [{ ts: 1700006400000, val: { hashrate_mhs_5m_sum_aggr: 100000 } }], error: null }] + ] + + const daily = processHashrateData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + const key = Object.keys(daily)[0] + t.is(daily[key], 100000, 'should extract hashrate from val') + t.pass() +}) + +test('processHashrateData - processes object-keyed data', (t) => { + const results = [ + [{ data: { 1700006400000: { hashrate_mhs_5m_sum_aggr: 100000 } } }] + ] + + const daily = processHashrateData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + t.pass() +}) + +test('processHashrateData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const daily = processHashrateData(results) + t.ok(typeof daily === 'object', 'should return object') + t.is(Object.keys(daily).length, 0, 'should be empty for error results') + t.pass() +}) + +test('processHashrateData - aggregates multiple orks', (t) => { + const results = [ + [{ data: { 1700006400000: { hashrate_mhs_5m_sum_aggr: 50000 } } }], + [{ data: { 1700006400000: { hashrate_mhs_5m_sum_aggr: 30000 } } }] + ] + + const daily = processHashrateData(results) + const key = Object.keys(daily)[0] + t.is(daily[key], 80000, 'should sum hashrate from multiple orks') + t.pass() +}) + +test('calculateHashrateSummary - calculates from log entries', (t) => { + const log = [ + { ts: 1700006400000, hashrateMhs: 100000 }, + { ts: 1700092800000, hashrateMhs: 120000 } + ] + + const summary = calculateHashrateSummary(log) + t.is(summary.totalHashrateMhs, 220000, 'should sum hashrate') + t.is(summary.avgHashrateMhs, 110000, 'should average hashrate') + t.pass() +}) + +test('calculateHashrateSummary - handles empty log', (t) => { + const summary = calculateHashrateSummary([]) + t.is(summary.totalHashrateMhs, 0, 'should be zero') + t.is(summary.avgHashrateMhs, null, 'should be null') + t.pass() +}) + +// ==================== Consumption Tests ==================== + +test('getConsumption - happy path', async (t) => { + const dayTs = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ type: 'powermeter', data: [{ ts: dayTs, val: { site_power_w: 5000000 } }], error: null }] + } + } + } + + const mockReq = { + query: { start: 1700000000000, end: 1700100000000 } + } + + const result = await getConsumption(mockCtx, mockReq) + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].powerW, 5000000, 'should have power value') + t.is(result.log[0].consumptionMWh, (5000000 * 24) / 1000000, 'should convert to MWh') + t.ok(result.summary.avgPowerW !== null, 'should have avg power') + t.ok(result.summary.totalConsumptionMWh > 0, 'should have total consumption') + t.pass() +}) + +test('getConsumption - missing start throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getConsumption(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getConsumption - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getConsumption(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getConsumption - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getConsumption(mockCtx, { query: { start: 1700000000000, end: 1700100000000 } }) + t.ok(result.log, 'should return log array') + t.is(result.log.length, 0, 'log should be empty with no data') + t.is(result.summary.totalConsumptionMWh, 0, 'total should be zero') + t.is(result.summary.avgPowerW, null, 'avg should be null') + t.pass() +}) + +test('processConsumptionData - processes array data from ORK', (t) => { + const results = [ + [{ type: 'powermeter', data: [{ ts: 1700006400000, val: { site_power_w: 5000 } }], error: null }] + ] + + const daily = processConsumptionData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + const key = Object.keys(daily)[0] + t.is(daily[key], 5000, 'should extract power from val') + t.pass() +}) + +test('processConsumptionData - processes object-keyed data', (t) => { + const results = [ + [{ data: { 1700006400000: { site_power_w: 5000 } } }] + ] + + const daily = processConsumptionData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + t.pass() +}) + +test('processConsumptionData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const daily = processConsumptionData(results) + t.ok(typeof daily === 'object', 'should return object') + t.is(Object.keys(daily).length, 0, 'should be empty for error results') + t.pass() +}) + +test('processConsumptionData - aggregates multiple orks', (t) => { + const results = [ + [{ data: { 1700006400000: { site_power_w: 3000 } } }], + [{ data: { 1700006400000: { site_power_w: 2000 } } }] + ] + + const daily = processConsumptionData(results) + const key = Object.keys(daily)[0] + t.is(daily[key], 5000, 'should sum power from multiple orks') + t.pass() +}) + +test('calculateConsumptionSummary - calculates from log entries', (t) => { + const log = [ + { ts: 1700006400000, powerW: 5000000, consumptionMWh: 120 }, + { ts: 1700092800000, powerW: 4000000, consumptionMWh: 96 } + ] + + const summary = calculateConsumptionSummary(log) + t.is(summary.totalConsumptionMWh, 216, 'should sum consumption') + t.is(summary.avgPowerW, 4500000, 'should average power') + t.pass() +}) + +test('calculateConsumptionSummary - handles empty log', (t) => { + const summary = calculateConsumptionSummary([]) + t.is(summary.totalConsumptionMWh, 0, 'should be zero') + t.is(summary.avgPowerW, null, 'should be null') + t.pass() +}) + +// ==================== Efficiency Tests ==================== + +test('getEfficiency - happy path', async (t) => { + const dayTs = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ type: 'miner', data: [{ ts: dayTs, val: { efficiency_w_ths_avg_aggr: 25.5 } }], error: null }] + } + } + } + + const mockReq = { + query: { start: 1700000000000, end: 1700100000000 } + } + + const result = await getEfficiency(mockCtx, mockReq) + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].efficiencyWThs, 25.5, 'should have efficiency value') + t.ok(result.summary.avgEfficiencyWThs !== null, 'should have avg efficiency') + t.pass() +}) + +test('getEfficiency - missing start throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getEfficiency(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getEfficiency - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getEfficiency(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getEfficiency - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getEfficiency(mockCtx, { query: { start: 1700000000000, end: 1700100000000 } }) + t.ok(result.log, 'should return log array') + t.is(result.log.length, 0, 'log should be empty with no data') + t.is(result.summary.avgEfficiencyWThs, null, 'avg should be null') + t.pass() +}) + +test('processEfficiencyData - processes array data from ORK', (t) => { + const results = [ + [{ type: 'miner', data: [{ ts: 1700006400000, val: { efficiency_w_ths_avg_aggr: 25.5 } }], error: null }] + ] + + const daily = processEfficiencyData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + const key = Object.keys(daily)[0] + t.is(daily[key].total, 25.5, 'should extract efficiency total') + t.is(daily[key].count, 1, 'should track count') + t.pass() +}) + +test('processEfficiencyData - processes object-keyed data', (t) => { + const results = [ + [{ data: { 1700006400000: { efficiency_w_ths_avg_aggr: 25.5 } } }] + ] + + const daily = processEfficiencyData(results) + t.ok(typeof daily === 'object', 'should return object') + t.ok(Object.keys(daily).length > 0, 'should have entries') + t.pass() +}) + +test('processEfficiencyData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const daily = processEfficiencyData(results) + t.ok(typeof daily === 'object', 'should return object') + t.is(Object.keys(daily).length, 0, 'should be empty for error results') + t.pass() +}) + +test('processEfficiencyData - averages across multiple orks', (t) => { + const results = [ + [{ data: { 1700006400000: { efficiency_w_ths_avg_aggr: 20 } } }], + [{ data: { 1700006400000: { efficiency_w_ths_avg_aggr: 30 } } }] + ] + + const daily = processEfficiencyData(results) + const key = Object.keys(daily)[0] + t.is(daily[key].total, 50, 'should sum efficiency totals') + t.is(daily[key].count, 2, 'should track count from multiple orks') + t.pass() +}) + +test('calculateEfficiencySummary - calculates from log entries', (t) => { + const log = [ + { ts: 1700006400000, efficiencyWThs: 25 }, + { ts: 1700092800000, efficiencyWThs: 27 } + ] + + const summary = calculateEfficiencySummary(log) + t.is(summary.avgEfficiencyWThs, 26, 'should average efficiency') + t.pass() +}) + +test('calculateEfficiencySummary - handles empty log', (t) => { + const summary = calculateEfficiencySummary([]) + t.is(summary.avgEfficiencyWThs, null, 'should be null') + t.pass() +}) + +// ==================== Miner Status Tests ==================== + +test('sumObjectValues - sums keyed object values', (t) => { + t.is(sumObjectValues({ a: 5, b: 3, c: 2 }), 10, 'should sum all values') + t.is(sumObjectValues({}), 0, 'should return 0 for empty object') + t.is(sumObjectValues(null), 0, 'should return 0 for null') + t.is(sumObjectValues(undefined), 0, 'should return 0 for undefined') + t.is(sumObjectValues({ a: 'not_a_number', b: 5 }), 5, 'should skip non-numeric values') + t.pass() +}) + +test('getMinerStatus - happy path', async (t) => { + const dayTs = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ + ts: dayTs, + type_cnt: { 'miner-am-s19xp': 60, 'miner-wm-m30sp': 40 }, + offline_cnt: { offl_hashboard: 5, offl_fan: 3 }, + power_mode_sleep_cnt: { sleep: 10 }, + maintenance_type_cnt: { repair: 2 } + }] + } + } + } + + const mockReq = { + query: { start: 1700000000000, end: 1700100000000 } + } + + const result = await getMinerStatus(mockCtx, mockReq) + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].offline, 8, 'should sum offline counts (5+3)') + t.is(result.log[0].sleep, 10, 'should sum sleep counts') + t.is(result.log[0].maintenance, 2, 'should sum maintenance counts') + t.is(result.log[0].online, 80, 'should derive online (100-8-10-2)') + t.pass() +}) + +test('getMinerStatus - missing start throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getMinerStatus(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getMinerStatus - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getMinerStatus(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getMinerStatus - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getMinerStatus(mockCtx, { query: { start: 1700000000000, end: 1700100000000 } }) + t.ok(result.log, 'should return log array') + t.is(result.log.length, 0, 'log should be empty with no data') + t.is(result.summary.avgOnline, null, 'avg online should be null') + t.is(result.summary.avgOffline, null, 'avg offline should be null') + t.pass() +}) + +test('processMinerStatusData - processes daily entries', (t) => { + const results = [[ + { + ts: 1700006400000, + type_cnt: { 'miner-am-s19xp': 60, 'miner-wm-m30sp': 40 }, + offline_cnt: { offl_hashboard: 5 }, + power_mode_sleep_cnt: { sleep: 10 }, + maintenance_type_cnt: { repair: 2 } + } + ]] + + const daily = processMinerStatusData(results) + t.ok(typeof daily === 'object', 'should return object') + const key = Object.keys(daily)[0] + t.is(daily[key].offline, 5, 'should extract offline count') + t.is(daily[key].sleep, 10, 'should extract sleep count') + t.is(daily[key].maintenance, 2, 'should extract maintenance count') + t.is(daily[key].online, 83, 'should derive online count (100-5-10-2)') + t.pass() +}) + +test('processMinerStatusData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const daily = processMinerStatusData(results) + t.ok(typeof daily === 'object', 'should return object') + t.is(Object.keys(daily).length, 0, 'should be empty for error results') + t.pass() +}) + +test('processMinerStatusData - aggregates multiple orks same day', (t) => { + const results = [ + [{ + ts: 1700006400000, + type_cnt: { 'miner-am-s19xp': 30, 'miner-wm-m30sp': 20 }, + offline_cnt: { offl_fan: 3 }, + power_mode_sleep_cnt: { sleep: 5 }, + maintenance_type_cnt: {} + }], + [{ + ts: 1700006400000, + type_cnt: { 'miner-am-s19xp': 30, 'miner-wm-m30sp': 20 }, + offline_cnt: { offl_hashboard: 2 }, + power_mode_sleep_cnt: {}, + maintenance_type_cnt: { repair: 1 } + }] + ] + + const daily = processMinerStatusData(results) + const key = Object.keys(daily)[0] + t.is(daily[key].offline, 5, 'should sum offline across orks (3+2)') + t.is(daily[key].sleep, 5, 'should sum sleep across orks') + t.is(daily[key].maintenance, 1, 'should sum maintenance across orks') + t.is(daily[key].online, 89, 'should derive total online (47+42)') + t.pass() +}) + +test('processMinerStatusData - handles entries with aggrFields wrapper', (t) => { + const results = [[ + { + ts: 1700006400000, + type_cnt: { 'miner-am-s19xp': 60, 'miner-wm-m30sp': 40 }, + aggrFields: { + offline_cnt: { offl_hashboard: 10 }, + power_mode_sleep_cnt: { sleep: 5 }, + maintenance_type_cnt: { repair: 3 } + } + } + ]] + + const daily = processMinerStatusData(results) + const key = Object.keys(daily)[0] + t.is(daily[key].offline, 10, 'should extract from aggrFields wrapper') + t.is(daily[key].sleep, 5, 'should extract sleep from aggrFields') + t.is(daily[key].maintenance, 3, 'should extract maintenance from aggrFields') + t.pass() +}) + +test('calculateMinerStatusSummary - calculates from log entries', (t) => { + const log = [ + { ts: 1700006400000, online: 80, offline: 10, sleep: 5, maintenance: 5 }, + { ts: 1700092800000, online: 85, offline: 8, sleep: 4, maintenance: 3 } + ] + + const summary = calculateMinerStatusSummary(log) + t.is(summary.avgOnline, 82.5, 'should average online') + t.is(summary.avgOffline, 9, 'should average offline') + t.is(summary.avgSleep, 4.5, 'should average sleep') + t.is(summary.avgMaintenance, 4, 'should average maintenance') + t.pass() +}) + +test('calculateMinerStatusSummary - handles empty log', (t) => { + const summary = calculateMinerStatusSummary([]) + t.is(summary.avgOnline, null, 'should be null') + t.is(summary.avgOffline, null, 'should be null') + t.is(summary.avgSleep, null, 'should be null') + t.is(summary.avgMaintenance, null, 'should be null') + t.pass() +}) + +// ==================== Interval Utils Tests ==================== + +test('resolveInterval - auto-selects 1h for <= 2 days', (t) => { + const twoDays = 2 * 24 * 60 * 60 * 1000 + t.is(resolveInterval(0, twoDays, null), '1h', 'should select 1h for 2 day range') + t.is(resolveInterval(0, twoDays - 1, null), '1h', 'should select 1h for < 2 day range') + t.pass() +}) + +test('resolveInterval - auto-selects 1d for <= 90 days', (t) => { + const threeDays = 3 * 24 * 60 * 60 * 1000 + const ninetyDays = 90 * 24 * 60 * 60 * 1000 + t.is(resolveInterval(0, threeDays, null), '1d', 'should select 1d for 3 day range') + t.is(resolveInterval(0, ninetyDays, null), '1d', 'should select 1d for 90 day range') + t.pass() +}) + +test('resolveInterval - auto-selects 1w for > 90 days', (t) => { + const ninetyOneDays = 91 * 24 * 60 * 60 * 1000 + t.is(resolveInterval(0, ninetyOneDays, null), '1w', 'should select 1w for > 90 day range') + t.pass() +}) + +test('resolveInterval - uses requested interval when provided', (t) => { + t.is(resolveInterval(0, 1000, '1w'), '1w', 'should use requested interval') + t.is(resolveInterval(0, 999999999999, '1h'), '1h', 'should override auto with requested') + t.pass() +}) + +test('getIntervalConfig - returns correct configs', (t) => { + const h = getIntervalConfig('1h') + t.is(h.key, 'stat-3h', '1h key should be stat-3h') + t.is(h.groupRange, null, '1h should have no groupRange') + + const d = getIntervalConfig('1d') + t.is(d.key, 'stat-3h', '1d key should be stat-3h') + t.is(d.groupRange, '1D', '1d groupRange should be 1D') + + const w = getIntervalConfig('1w') + t.is(w.key, 'stat-3h', '1w key should be stat-3h') + t.is(w.groupRange, '1W', '1w groupRange should be 1W') + + t.pass() +}) + +// ==================== parseEntryTs Tests ==================== + +test('parseEntryTs - handles numeric ts', (t) => { + t.is(parseEntryTs(1700006400000), 1700006400000, 'should return number as-is') + t.pass() +}) + +test('parseEntryTs - handles range string ts', (t) => { + t.is(parseEntryTs('1770854400000-1771459199999'), 1770854400000, 'should extract start of range') + t.is(parseEntryTs('1771459200000-1771545599999'), 1771459200000, 'should extract start of range') + t.pass() +}) + +test('parseEntryTs - handles plain numeric string', (t) => { + t.is(parseEntryTs('1700006400000'), 1700006400000, 'should parse numeric string') + t.pass() +}) + +test('parseEntryTs - returns null for invalid input', (t) => { + t.is(parseEntryTs(null), null, 'null returns null') + t.is(parseEntryTs(undefined), null, 'undefined returns null') + t.pass() +}) + +// ==================== Power Mode Tests ==================== + +test('processPowerModeData - handles range string ts with groupRange', (t) => { + const results = [[{ + ts: '1700006400000-1700092799999', + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }]] + + const points = processPowerModeData(results, '1D') + t.ok(Object.keys(points).length > 0, 'should have entries despite range string ts') + const key = Object.keys(points)[0] + t.is(points[key].normal, 1, 'should count normal') + t.pass() +}) + +test('processTemperatureData - handles range string ts with groupRange', (t) => { + const results = [[{ + ts: '1700006400000-1700092799999', + temperature_c_group_max_aggr: { cont1: 65 }, + temperature_c_group_avg_aggr: { cont1: 55 } + }]] + + const points = processTemperatureData(results, '1D', null) + t.ok(Object.keys(points).length > 0, 'should have entries despite range string ts') + const key = Object.keys(points)[0] + t.is(points[key].containers.cont1.maxC, 65, 'should have temp data') + t.pass() +}) + +test('getPowerMode - happy path', async (t) => { + const ts = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ + ts, + power_mode_group_aggr: { 'cont1-miner1': 'normal', 'cont1-miner2': 'low' }, + status_group_aggr: { 'cont1-miner1': 'mining', 'cont1-miner2': 'mining' } + }] + } + } + } + + const result = await getPowerMode(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].normal, 1, 'should count normal miners') + t.is(result.log[0].low, 1, 'should count low miners') + t.pass() +}) + +test('getPowerMode - missing start/end throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getPowerMode(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getPowerMode - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getPowerMode(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getPowerMode - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getPowerMode(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.ok(result.log, 'should return log array') + t.is(result.log.length, 0, 'log should be empty') + t.is(result.summary.avgNormal, null, 'avg should be null') + t.pass() +}) + +test('categorizeMiner - status overrides power mode', (t) => { + t.is(categorizeMiner('normal', 'offline'), 'offline', 'offline status should override') + t.is(categorizeMiner('high', 'error'), 'error', 'error status should override') + t.is(categorizeMiner('normal', 'maintenance'), 'maintenance', 'maintenance should override') + t.is(categorizeMiner('high', 'idle'), 'notMining', 'idle should map to notMining') + t.is(categorizeMiner('high', 'stopped'), 'notMining', 'stopped should map to notMining') + t.pass() +}) + +test('categorizeMiner - power mode categories', (t) => { + t.is(categorizeMiner('low', 'mining'), 'low', 'low mode with mining status') + t.is(categorizeMiner('high', 'mining'), 'high', 'high mode with mining status') + t.is(categorizeMiner('sleep', 'mining'), 'sleep', 'sleep mode with mining status') + t.is(categorizeMiner('normal', 'mining'), 'normal', 'normal mode with mining status') + t.is(categorizeMiner('normal', ''), 'normal', 'normal mode with empty status') + t.pass() +}) + +test('processPowerModeData - counts modes correctly', (t) => { + const results = [[{ + ts: 1700006400000, + power_mode_group_aggr: { + 'cont1-miner1': 'normal', + 'cont1-miner2': 'low', + 'cont1-miner3': 'high' + }, + status_group_aggr: { + 'cont1-miner1': 'mining', + 'cont1-miner2': 'mining', + 'cont1-miner3': 'offline' + } + }]] + + const points = processPowerModeData(results, '1D') + const key = Object.keys(points)[0] + t.is(points[key].normal, 1, 'should count 1 normal') + t.is(points[key].low, 1, 'should count 1 low') + t.is(points[key].offline, 1, 'miner3 offline overrides high') + t.is(points[key].high, 0, 'miner3 classified as offline, not high') + t.pass() +}) + +test('processPowerModeData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const points = processPowerModeData(results, '1D') + t.is(Object.keys(points).length, 0, 'should be empty') + t.pass() +}) + +test('processPowerModeData - merges across multiple orks', (t) => { + const results = [ + [{ + ts: 1700006400000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }], + [{ + ts: 1700006400000, + power_mode_group_aggr: { 'cont2-miner1': 'low' }, + status_group_aggr: { 'cont2-miner1': 'mining' } + }] + ] + + const points = processPowerModeData(results, '1D') + const key = Object.keys(points)[0] + t.is(points[key].normal, 1, 'should count ork1 normal') + t.is(points[key].low, 1, 'should count ork2 low') + t.pass() +}) + +test('calculatePowerModeSummary - calculates averages', (t) => { + const log = [ + { ts: 1, low: 2, normal: 8, high: 0, sleep: 0, offline: 0, notMining: 0, maintenance: 0, error: 0 }, + { ts: 2, low: 4, normal: 6, high: 0, sleep: 0, offline: 0, notMining: 0, maintenance: 0, error: 0 } + ] + + const summary = calculatePowerModeSummary(log) + t.is(summary.avgLow, 3, 'should average low') + t.is(summary.avgNormal, 7, 'should average normal') + t.pass() +}) + +test('calculatePowerModeSummary - handles empty log', (t) => { + const summary = calculatePowerModeSummary([]) + t.is(summary.avgNormal, null, 'should be null') + t.is(summary.avgLow, null, 'should be null') + t.is(summary.avgOffline, null, 'should be null') + t.pass() +}) + +// ==================== Power Mode Timeline Tests ==================== + +test('getPowerModeTimeline - happy path', async (t) => { + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [ + { + ts: 1700000000000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700010800000, + power_mode_group_aggr: { 'cont1-miner1': 'low' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + } + ] + } + } + } + + const result = await getPowerModeTimeline(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.ok(result.log, 'should return log array') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.is(result.log[0].minerId, 'cont1-miner1', 'should have miner ID') + t.ok(result.log[0].segments.length > 0, 'should have segments') + t.pass() +}) + +test('getPowerModeTimeline - default start/end', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ([]) } + } + + const result = await getPowerModeTimeline(mockCtx, { query: {} }) + t.ok(result.log, 'should return log with defaults') + t.ok(Array.isArray(result.log), 'should be array') + t.pass() +}) + +test('getPowerModeTimeline - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getPowerModeTimeline(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getPowerModeTimeline - empty results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getPowerModeTimeline(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.is(result.log.length, 0, 'should be empty') + t.pass() +}) + +test('processPowerModeTimelineData - groups by miner and sorts by ts', (t) => { + const results = [[ + { + ts: 1700010800000, + power_mode_group_aggr: { 'cont1-miner1': 'low' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700000000000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + } + ]] + + const log = processPowerModeTimelineData(results, null) + t.is(log.length, 1, 'should group into 1 miner') + t.is(log[0].minerId, 'cont1-miner1', 'should have correct miner id') + t.is(log[0].segments[0].powerMode, 'normal', 'first segment should be earlier entry (normal)') + t.is(log[0].segments[1].powerMode, 'low', 'second segment should be later entry (low)') + t.pass() +}) + +test('processPowerModeTimelineData - merges consecutive same-mode segments', (t) => { + const results = [[ + { + ts: 1700000000000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700010800000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700021600000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + } + ]] + + const log = processPowerModeTimelineData(results, null) + t.is(log[0].segments.length, 1, 'should merge 3 entries into 1 segment') + t.is(log[0].segments[0].from, 1700000000000, 'segment should start at first entry') + t.is(log[0].segments[0].to, 1700021600000, 'segment should end at last entry') + t.pass() +}) + +test('processPowerModeTimelineData - mode changes create new segments', (t) => { + const results = [[ + { + ts: 1700000000000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700010800000, + power_mode_group_aggr: { 'cont1-miner1': 'low' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + }, + { + ts: 1700021600000, + power_mode_group_aggr: { 'cont1-miner1': 'normal' }, + status_group_aggr: { 'cont1-miner1': 'mining' } + } + ]] + + const log = processPowerModeTimelineData(results, null) + t.is(log[0].segments.length, 3, 'should create 3 separate segments') + t.is(log[0].segments[0].powerMode, 'normal', 'first segment normal') + t.is(log[0].segments[1].powerMode, 'low', 'second segment low') + t.is(log[0].segments[2].powerMode, 'normal', 'third segment normal') + t.pass() +}) + +test('processPowerModeTimelineData - extracts container from miner id', (t) => { + const results = [[ + { + ts: 1700000000000, + power_mode_group_aggr: { 'container-a-pos1-miner1': 'normal' }, + status_group_aggr: { 'container-a-pos1-miner1': 'mining' } + } + ]] + + const log = processPowerModeTimelineData(results, null) + t.is(log[0].container, 'container-a-pos1', 'should extract container from miner id') + t.pass() +}) + +test('getPowerModeTimeline - always uses t-miner tag', async (t) => { + let capturedPayload = null + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { + jRequest: async (key, method, payload) => { + capturedPayload = payload + return [] + } + } + } + + await getPowerModeTimeline(mockCtx, { + query: { start: 1700000000000, end: 1700100000000, container: 'my-container' } + }) + + t.is(capturedPayload.tag, 't-miner', 'should always use t-miner tag for RPC') + t.pass() +}) + +test('processPowerModeTimelineData - filters by container post-RPC', (t) => { + const results = [[ + { + ts: 1700000000000, + power_mode_group_aggr: { 'cont1-miner1': 'normal', 'cont2-miner1': 'low' }, + status_group_aggr: { 'cont1-miner1': 'mining', 'cont2-miner1': 'mining' } + } + ]] + + const log = processPowerModeTimelineData(results, 'cont1') + t.is(log.length, 1, 'should only include miners from cont1') + t.is(log[0].container, 'cont1', 'should be cont1') + t.pass() +}) + +// ==================== Temperature Tests ==================== + +test('getTemperature - happy path', async (t) => { + const ts = 1700006400000 + const mockCtx = { + conf: { + orks: [{ rpcPublicKey: 'key1' }] + }, + net_r0: { + jRequest: async () => { + return [{ + ts, + temperature_c_group_max_aggr: { container1: 65, container2: 72 }, + temperature_c_group_avg_aggr: { container1: 55, container2: 60 } + }] + } + } + } + + const result = await getTemperature(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.ok(result.log, 'should return log array') + t.ok(result.summary, 'should return summary') + t.ok(Array.isArray(result.log), 'log should be array') + t.ok(result.log.length > 0, 'log should have entries') + t.ok(result.log[0].containers, 'should have containers object') + t.is(result.log[0].containers.container1.maxC, 65, 'should have container1 max temp') + t.is(result.log[0].containers.container2.avgC, 60, 'should have container2 avg temp') + t.is(result.log[0].siteMaxC, 72, 'should have site max temp') + t.ok(result.summary.peakTemp !== null, 'should have peak temp') + t.pass() +}) + +test('getTemperature - missing start/end throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getTemperature(mockCtx, { query: { end: 1700100000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_MISSING_START_END', 'should throw missing start/end error') + } + t.pass() +}) + +test('getTemperature - invalid range throws', async (t) => { + const mockCtx = { + conf: { orks: [] }, + net_r0: { jRequest: async () => ({}) } + } + + try { + await getTemperature(mockCtx, { query: { start: 1700100000000, end: 1700000000000 } }) + t.fail('should have thrown') + } catch (err) { + t.is(err.message, 'ERR_INVALID_DATE_RANGE', 'should throw invalid range error') + } + t.pass() +}) + +test('getTemperature - empty ork results', async (t) => { + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { jRequest: async () => ({}) } + } + + const result = await getTemperature(mockCtx, { + query: { start: 1700000000000, end: 1700100000000 } + }) + + t.ok(result.log, 'should return log array') + t.is(result.log.length, 0, 'log should be empty') + t.is(result.summary.avgMaxTemp, null, 'avg max should be null') + t.is(result.summary.avgAvgTemp, null, 'avg avg should be null') + t.is(result.summary.peakTemp, null, 'peak should be null') + t.pass() +}) + +test('processTemperatureData - extracts per-container temps', (t) => { + const results = [[{ + ts: 1700006400000, + temperature_c_group_max_aggr: { cont1: 65, cont2: 72 }, + temperature_c_group_avg_aggr: { cont1: 55, cont2: 60 } + }]] + + const points = processTemperatureData(results, '1D', null) + const key = Object.keys(points)[0] + t.is(points[key].containers.cont1.maxC, 65, 'should have cont1 max') + t.is(points[key].containers.cont2.maxC, 72, 'should have cont2 max') + t.is(points[key].containers.cont1.avgC, 55, 'should have cont1 avg') + t.is(points[key].containers.cont2.avgC, 60, 'should have cont2 avg') + t.pass() +}) + +test('processTemperatureData - calculates site-wide aggregates', (t) => { + const results = [[{ + ts: 1700006400000, + temperature_c_group_max_aggr: { cont1: 65, cont2: 72 }, + temperature_c_group_avg_aggr: { cont1: 55, cont2: 60 } + }]] + + const points = processTemperatureData(results, '1D', null) + const key = Object.keys(points)[0] + t.is(points[key].siteMaxC, 72, 'site max should be highest container max') + t.is(points[key].siteAvgC, 57.5, 'site avg should average container avgs') + t.pass() +}) + +test('processTemperatureData - filters by container', (t) => { + const results = [[{ + ts: 1700006400000, + temperature_c_group_max_aggr: { cont1: 65, cont2: 72 }, + temperature_c_group_avg_aggr: { cont1: 55, cont2: 60 } + }]] + + const points = processTemperatureData(results, '1D', 'cont1') + const key = Object.keys(points)[0] + t.ok(points[key].containers.cont1, 'should have cont1') + t.ok(!points[key].containers.cont2, 'should not have cont2') + t.is(points[key].siteMaxC, 65, 'site max should be cont1 max') + t.pass() +}) + +test('processTemperatureData - handles error results', (t) => { + const results = [{ error: 'timeout' }] + const points = processTemperatureData(results, '1D', null) + t.is(Object.keys(points).length, 0, 'should be empty') + t.pass() +}) + +test('calculateTemperatureSummary - calculates averages and peak', (t) => { + const log = [ + { ts: 1, containers: {}, siteMaxC: 70, siteAvgC: 55 }, + { ts: 2, containers: {}, siteMaxC: 75, siteAvgC: 60 } + ] + + const summary = calculateTemperatureSummary(log) + t.is(summary.avgMaxTemp, 72.5, 'should average max temps') + t.is(summary.avgAvgTemp, 57.5, 'should average avg temps') + t.is(summary.peakTemp, 75, 'should find peak temp') + t.pass() +}) + +test('calculateTemperatureSummary - handles empty log', (t) => { + const summary = calculateTemperatureSummary([]) + t.is(summary.avgMaxTemp, null, 'should be null') + t.is(summary.avgAvgTemp, null, 'should be null') + t.is(summary.peakTemp, null, 'should be null') + t.pass() +}) + +test('getTemperature - always uses t-miner tag with container post-filter', async (t) => { + let capturedPayload = null + const mockCtx = { + conf: { orks: [{ rpcPublicKey: 'key1' }] }, + net_r0: { + jRequest: async (key, method, payload) => { + capturedPayload = payload + return [] + } + } + } + + await getTemperature(mockCtx, { + query: { start: 1700000000000, end: 1700100000000, container: 'my-container' } + }) + + t.is(capturedPayload.tag, 't-miner', 'should always use t-miner tag for RPC') + t.pass() +}) diff --git a/tests/unit/routes/metrics.routes.test.js b/tests/unit/routes/metrics.routes.test.js new file mode 100644 index 0000000..198d798 --- /dev/null +++ b/tests/unit/routes/metrics.routes.test.js @@ -0,0 +1,63 @@ +'use strict' + +const test = require('brittle') +const { testModuleStructure, testHandlerFunctions, testOnRequestFunctions } = require('../helpers/routeTestHelpers') +const { createRoutesForTest } = require('../helpers/mockHelpers') + +const ROUTES_PATH = '../../../workers/lib/server/routes/metrics.routes.js' + +test('metrics routes - module structure', (t) => { + testModuleStructure(t, ROUTES_PATH, 'metrics') + t.pass() +}) + +test('metrics routes - route definitions', (t) => { + const routes = createRoutesForTest(ROUTES_PATH) + + const routeUrls = routes.map(route => route.url) + t.ok(routeUrls.includes('/auth/metrics/hashrate'), 'should have hashrate route') + t.ok(routeUrls.includes('/auth/metrics/consumption'), 'should have consumption route') + t.ok(routeUrls.includes('/auth/metrics/efficiency'), 'should have efficiency route') + t.ok(routeUrls.includes('/auth/metrics/miner-status'), 'should have miner-status route') + t.ok(routeUrls.includes('/auth/metrics/power-mode'), 'should have power-mode route') + t.ok(routeUrls.includes('/auth/metrics/power-mode/timeline'), 'should have power-mode/timeline route') + t.ok(routeUrls.includes('/auth/metrics/temperature'), 'should have temperature route') + + t.pass() +}) + +test('metrics routes - HTTP methods', (t) => { + const routes = createRoutesForTest(ROUTES_PATH) + + routes.forEach(route => { + t.is(route.method, 'GET', `route ${route.url} should be GET`) + }) + + t.pass() +}) + +test('metrics routes - schema integration', (t) => { + const routes = createRoutesForTest(ROUTES_PATH) + + const routesWithSchemas = routes.filter(route => route.schema) + routesWithSchemas.forEach(route => { + t.ok(route.schema, `route ${route.url} should have schema`) + if (route.schema.querystring) { + t.ok(typeof route.schema.querystring === 'object', `route ${route.url} querystring should be object`) + } + }) + + t.pass() +}) + +test('metrics routes - handler functions', (t) => { + const routes = createRoutesForTest(ROUTES_PATH) + testHandlerFunctions(t, routes, 'metrics') + t.pass() +}) + +test('metrics routes - onRequest functions', (t) => { + const routes = createRoutesForTest(ROUTES_PATH) + testOnRequestFunctions(t, routes, 'metrics') + t.pass() +}) diff --git a/workers/lib/constants.js b/workers/lib/constants.js index 954b37e..030835e 100644 --- a/workers/lib/constants.js +++ b/workers/lib/constants.js @@ -132,7 +132,16 @@ const ENDPOINTS = { POOL_MANAGER_ASSIGN: '/auth/pool-manager/miners/assign', POOL_MANAGER_POWER_MODE: '/auth/pool-manager/miners/power-mode', - SITE_STATUS_LIVE: '/auth/site/status/live' + SITE_STATUS_LIVE: '/auth/site/status/live', + + // Metrics endpoints + METRICS_HASHRATE: '/auth/metrics/hashrate', + METRICS_CONSUMPTION: '/auth/metrics/consumption', + METRICS_EFFICIENCY: '/auth/metrics/efficiency', + METRICS_MINER_STATUS: '/auth/metrics/miner-status', + METRICS_POWER_MODE: '/auth/metrics/power-mode', + METRICS_POWER_MODE_TIMELINE: '/auth/metrics/power-mode/timeline', + METRICS_TEMPERATURE: '/auth/metrics/temperature' } const HTTP_METHODS = { @@ -232,7 +241,16 @@ const AGGR_FIELDS = { SITE_POWER: 'site_power_w', ENERGY_AGGR: 'energy_aggr', ACTIVE_ENERGY_IN: 'active_energy_in_aggr', - UTE_ENERGY: 'ute_energy_aggr' + UTE_ENERGY: 'ute_energy_aggr', + EFFICIENCY: 'efficiency_w_ths_avg_aggr', + POWER_MODE_GROUP: 'power_mode_group_aggr', + STATUS_GROUP: 'status_group_aggr', + TEMP_MAX: 'temperature_c_group_max_aggr', + TEMP_AVG: 'temperature_c_group_avg_aggr', + TYPE_CNT: 'type_cnt', + OFFLINE_CNT: 'offline_cnt', + SLEEP_CNT: 'power_mode_sleep_cnt', + MAINTENANCE_CNT: 'maintenance_type_cnt' } const PERIOD_TYPES = { diff --git a/workers/lib/server/handlers/metrics.handlers.js b/workers/lib/server/handlers/metrics.handlers.js new file mode 100644 index 0000000..406827c --- /dev/null +++ b/workers/lib/server/handlers/metrics.handlers.js @@ -0,0 +1,691 @@ +'use strict' + +const { + WORKER_TYPES, + AGGR_FIELDS, + RPC_METHODS +} = require('../../constants') +const { + requestRpcEachLimit, + getStartOfDay, + safeDiv +} = require('../../utils') + +const TWO_DAYS_MS = 2 * 24 * 60 * 60 * 1000 +const NINETY_DAYS_MS = 90 * 24 * 60 * 60 * 1000 +const THREE_HOURS_MS = 3 * 60 * 60 * 1000 +const ONE_MONTH_MS = 30 * 24 * 60 * 60 * 1000 +const DEFAULT_TIMELINE_LIMIT = 10080 + +// ==================== Shared Utilities ==================== + +/** + * Parse timestamp from RPC entry. + * With groupRange, ts may be a range string like "1770854400000-1771459199999". + * Extracts the start of the range in that case. + */ +function parseEntryTs (ts) { + if (typeof ts === 'number') return ts + if (typeof ts === 'string') { + const dashIdx = ts.indexOf('-') + if (dashIdx > 0) return Number(ts.slice(0, dashIdx)) + return Number(ts) + } + return null +} + +function validateStartEnd (req) { + const start = Number(req.query.start) + const end = Number(req.query.end) + + if (!start || !end) { + throw new Error('ERR_MISSING_START_END') + } + + if (start >= end) { + throw new Error('ERR_INVALID_DATE_RANGE') + } + + return { start, end } +} + +function * iterateRpcEntries (results) { + for (const res of results) { + if (!res || res.error) continue + const data = Array.isArray(res) ? res : (res.data || res.result || []) + if (!Array.isArray(data)) continue + for (const entry of data) { + if (!entry || entry.error) continue + yield entry + } + } +} + +function forEachRangeAggrItem (entry, callback) { + const items = entry.data || entry.items || entry + if (Array.isArray(items)) { + for (const item of items) { + const ts = getStartOfDay(parseEntryTs(item.ts || item.timestamp)) + if (!ts) continue + callback(ts, item.val || item) + } + } else if (typeof items === 'object') { + for (const [key, val] of Object.entries(items)) { + const ts = getStartOfDay(parseEntryTs(Number(key))) + if (!ts) continue + callback(ts, val) + } + } +} + +function sumObjectValues (obj) { + if (!obj || typeof obj !== 'object') return 0 + return Object.values(obj).reduce((sum, val) => sum + (Number(val) || 0), 0) +} + +/** + * Extract container name from miner ID. + * Strips the last dash-separated segment (assumed to be position/index). + * e.g. "bitdeer-9a-miner1" -> "bitdeer-9a" + * NOTE: Unverified against real power_mode_group_aggr data. + */ +function extractContainerFromMinerId (minerId) { + const lastDash = minerId.lastIndexOf('-') + return lastDash > 0 ? minerId.slice(0, lastDash) : minerId +} + +// ==================== Shared Interval Utils ==================== + +function resolveInterval (start, end, requested) { + if (requested) return requested + const range = end - start + if (range <= TWO_DAYS_MS) return '1h' + if (range <= NINETY_DAYS_MS) return '1d' + return '1w' +} + +function getIntervalConfig (interval) { + switch (interval) { + case '1h': + return { key: 'stat-3h', groupRange: null, divisorMs: THREE_HOURS_MS } + case '1d': + return { key: 'stat-3h', groupRange: '1D', divisorMs: 24 * 60 * 60 * 1000 } + case '1w': + return { key: 'stat-3h', groupRange: '1W', divisorMs: 7 * 24 * 60 * 60 * 1000 } + default: + return { key: 'stat-3h', groupRange: '1D', divisorMs: 24 * 60 * 60 * 1000 } + } +} + +// ==================== Hashrate ==================== + +async function getHashrate (ctx, req) { + const { start, end } = validateStartEnd(req) + + const startDate = new Date(start).toISOString() + const endDate = new Date(end).toISOString() + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG_RANGE_AGGR, { + keys: [{ + type: WORKER_TYPES.MINER, + startDate, + endDate, + fields: { [AGGR_FIELDS.HASHRATE_SUM]: 1 }, + shouldReturnDailyData: 1 + }] + }) + + const daily = processHashrateData(results) + const log = Object.keys(daily).sort().map(dayTs => ({ + ts: Number(dayTs), + hashrateMhs: daily[dayTs] + })) + + const summary = calculateHashrateSummary(log) + + return { log, summary } +} + +function processHashrateData (results) { + const daily = {} + for (const entry of iterateRpcEntries(results)) { + forEachRangeAggrItem(entry, (ts, val) => { + const v = typeof val === 'object' ? (val[AGGR_FIELDS.HASHRATE_SUM] || 0) : (Number(val) || 0) + daily[ts] = (daily[ts] || 0) + v + }) + } + return daily +} + +function calculateHashrateSummary (log) { + if (!log.length) { + return { + avgHashrateMhs: null, + totalHashrateMhs: 0 + } + } + + const total = log.reduce((sum, entry) => sum + (entry.hashrateMhs || 0), 0) + + return { + avgHashrateMhs: safeDiv(total, log.length), + totalHashrateMhs: total + } +} + +// ==================== Consumption ==================== + +async function getConsumption (ctx, req) { + const { start, end } = validateStartEnd(req) + + const startDate = new Date(start).toISOString() + const endDate = new Date(end).toISOString() + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG_RANGE_AGGR, { + keys: [{ + type: WORKER_TYPES.POWERMETER, + startDate, + endDate, + fields: { [AGGR_FIELDS.SITE_POWER]: 1 }, + shouldReturnDailyData: 1 + }] + }) + + const daily = processConsumptionData(results) + const log = Object.keys(daily).sort().map(dayTs => { + const powerW = daily[dayTs] + return { + ts: Number(dayTs), + powerW, + consumptionMWh: (powerW * 24) / 1000000 + } + }) + + const summary = calculateConsumptionSummary(log) + + return { log, summary } +} + +function processConsumptionData (results) { + const daily = {} + for (const entry of iterateRpcEntries(results)) { + forEachRangeAggrItem(entry, (ts, val) => { + const v = typeof val === 'object' ? (val[AGGR_FIELDS.SITE_POWER] || 0) : (Number(val) || 0) + daily[ts] = (daily[ts] || 0) + v + }) + } + return daily +} + +function calculateConsumptionSummary (log) { + if (!log.length) { + return { + avgPowerW: null, + totalConsumptionMWh: 0 + } + } + + const totalPower = log.reduce((sum, entry) => sum + (entry.powerW || 0), 0) + const totalConsumption = log.reduce((sum, entry) => sum + (entry.consumptionMWh || 0), 0) + + return { + avgPowerW: safeDiv(totalPower, log.length), + totalConsumptionMWh: totalConsumption + } +} + +// ==================== Efficiency ==================== + +async function getEfficiency (ctx, req) { + const { start, end } = validateStartEnd(req) + + const startDate = new Date(start).toISOString() + const endDate = new Date(end).toISOString() + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG_RANGE_AGGR, { + keys: [{ + type: WORKER_TYPES.MINER, + startDate, + endDate, + fields: { [AGGR_FIELDS.EFFICIENCY]: 1 }, + shouldReturnDailyData: 1 + }] + }) + + const daily = processEfficiencyData(results) + const log = Object.keys(daily).sort().map(dayTs => ({ + ts: Number(dayTs), + efficiencyWThs: daily[dayTs].total / daily[dayTs].count + })) + + const summary = calculateEfficiencySummary(log) + + return { log, summary } +} + +function processEfficiencyData (results) { + const daily = {} + for (const entry of iterateRpcEntries(results)) { + forEachRangeAggrItem(entry, (ts, val) => { + const eff = typeof val === 'object' ? (val[AGGR_FIELDS.EFFICIENCY] || 0) : (Number(val) || 0) + if (!eff) return + if (!daily[ts]) daily[ts] = { total: 0, count: 0 } + daily[ts].total += eff + daily[ts].count += 1 + }) + } + return daily +} + +function calculateEfficiencySummary (log) { + if (!log.length) { + return { + avgEfficiencyWThs: null + } + } + + const total = log.reduce((sum, entry) => sum + (entry.efficiencyWThs || 0), 0) + + return { + avgEfficiencyWThs: safeDiv(total, log.length) + } +} + +// ==================== Miner Status ==================== + +async function getMinerStatus (ctx, req) { + const { start, end } = validateStartEnd(req) + + const limit = Math.ceil((end - start) / THREE_HOURS_MS) + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG, { + key: 'stat-3h', + type: WORKER_TYPES.MINER, + tag: 't-miner', + aggrFields: { + [AGGR_FIELDS.TYPE_CNT]: 1, + [AGGR_FIELDS.OFFLINE_CNT]: 1, + [AGGR_FIELDS.SLEEP_CNT]: 1, + [AGGR_FIELDS.MAINTENANCE_CNT]: 1 + }, + groupRange: '1D', + shouldCalculateAvg: true, + limit + }) + + const daily = processMinerStatusData(results) + const log = Object.keys(daily).sort().map(dayTs => ({ + ts: Number(dayTs), + ...daily[dayTs] + })) + + const summary = calculateMinerStatusSummary(log) + + return { log, summary } +} + +function processMinerStatusData (results) { + const daily = {} + for (const entry of iterateRpcEntries(results)) { + const rawTs = parseEntryTs(entry.ts || entry.timestamp) + const ts = rawTs ? getStartOfDay(rawTs) : null + if (!ts) continue + if (!daily[ts]) { + daily[ts] = { online: 0, offline: 0, sleep: 0, maintenance: 0 } + } + + const offlineCnt = sumObjectValues(entry[AGGR_FIELDS.OFFLINE_CNT] || entry.aggrFields?.[AGGR_FIELDS.OFFLINE_CNT]) + const sleepCnt = sumObjectValues(entry[AGGR_FIELDS.SLEEP_CNT] || entry.aggrFields?.[AGGR_FIELDS.SLEEP_CNT]) + const maintenanceCnt = sumObjectValues(entry[AGGR_FIELDS.MAINTENANCE_CNT] || entry.aggrFields?.[AGGR_FIELDS.MAINTENANCE_CNT]) + + daily[ts].offline += offlineCnt + daily[ts].sleep += sleepCnt + daily[ts].maintenance += maintenanceCnt + + const totalCount = sumObjectValues(entry[AGGR_FIELDS.TYPE_CNT]) || entry.total_cnt || entry.count || 0 + if (totalCount > 0) { + daily[ts].online += Math.max(0, totalCount - offlineCnt - sleepCnt - maintenanceCnt) + } + } + return daily +} + +function calculateMinerStatusSummary (log) { + if (!log.length) { + return { + avgOnline: null, + avgOffline: null, + avgSleep: null, + avgMaintenance: null + } + } + + const totals = log.reduce((acc, entry) => { + acc.online += entry.online || 0 + acc.offline += entry.offline || 0 + acc.sleep += entry.sleep || 0 + acc.maintenance += entry.maintenance || 0 + return acc + }, { online: 0, offline: 0, sleep: 0, maintenance: 0 }) + + return { + avgOnline: safeDiv(totals.online, log.length), + avgOffline: safeDiv(totals.offline, log.length), + avgSleep: safeDiv(totals.sleep, log.length), + avgMaintenance: safeDiv(totals.maintenance, log.length) + } +} + +// ==================== Power Mode ==================== + +async function getPowerMode (ctx, req) { + const { start, end } = validateStartEnd(req) + + const interval = resolveInterval(start, end, req.query.interval) + const config = getIntervalConfig(interval) + const limit = Math.ceil((end - start) / config.divisorMs) + + const rpcPayload = { + key: config.key, + type: WORKER_TYPES.MINER, + tag: 't-miner', + aggrFields: { + [AGGR_FIELDS.POWER_MODE_GROUP]: 1, + [AGGR_FIELDS.STATUS_GROUP]: 1 + }, + shouldCalculateAvg: true, + limit + } + + if (config.groupRange) { + rpcPayload.groupRange = config.groupRange + } + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG, rpcPayload) + + const timePoints = processPowerModeData(results, config.groupRange) + const log = Object.keys(timePoints).sort().map(ts => ({ + ts: Number(ts), + ...timePoints[ts] + })) + + const summary = calculatePowerModeSummary(log) + + return { log, summary } +} + +function categorizeMiner (powerMode, status) { + if (status === 'offline' || status === 'error') return status + if (status === 'maintenance') return 'maintenance' + if (status === 'idle' || status === 'stopped') return 'notMining' + if (powerMode === 'low') return 'low' + if (powerMode === 'high') return 'high' + if (powerMode === 'sleep') return 'sleep' + return 'normal' +} + +function processPowerModeData (results, groupRange) { + const timePoints = {} + const emptyPoint = () => ({ low: 0, normal: 0, high: 0, sleep: 0, offline: 0, notMining: 0, maintenance: 0, error: 0 }) + + for (const entry of iterateRpcEntries(results)) { + const rawTs = parseEntryTs(entry.ts || entry.timestamp) + const ts = groupRange && rawTs ? getStartOfDay(rawTs) : rawTs + if (!ts) continue + + if (!timePoints[ts]) timePoints[ts] = emptyPoint() + + const powerModeObj = entry[AGGR_FIELDS.POWER_MODE_GROUP] || entry.aggrFields?.[AGGR_FIELDS.POWER_MODE_GROUP] || {} + const statusObj = entry[AGGR_FIELDS.STATUS_GROUP] || entry.aggrFields?.[AGGR_FIELDS.STATUS_GROUP] || {} + + if (typeof powerModeObj === 'object' && powerModeObj !== null) { + for (const [minerId, mode] of Object.entries(powerModeObj)) { + const minerStatus = statusObj[minerId] || '' + const category = categorizeMiner(mode, minerStatus) + timePoints[ts][category] = (timePoints[ts][category] || 0) + 1 + } + } + } + return timePoints +} + +function calculatePowerModeSummary (log) { + const categories = ['low', 'normal', 'high', 'sleep', 'offline', 'notMining', 'maintenance', 'error'] + if (!log.length) { + const summary = {} + for (const cat of categories) { + summary['avg' + cat.charAt(0).toUpperCase() + cat.slice(1)] = null + } + return summary + } + + const totals = {} + for (const cat of categories) totals[cat] = 0 + for (const entry of log) { + for (const cat of categories) { + totals[cat] += entry[cat] || 0 + } + } + + const summary = {} + for (const cat of categories) { + summary['avg' + cat.charAt(0).toUpperCase() + cat.slice(1)] = safeDiv(totals[cat], log.length) + } + return summary +} + +// ==================== Power Mode Timeline ==================== + +async function getPowerModeTimeline (ctx, req) { + const now = Date.now() + const start = Number(req.query.start) || (now - ONE_MONTH_MS) + const end = Number(req.query.end) || now + const limit = Number(req.query.limit) || DEFAULT_TIMELINE_LIMIT + const container = req.query.container || null + + if (start >= end) { + throw new Error('ERR_INVALID_DATE_RANGE') + } + + const rpcPayload = { + key: 'stat-3h', + type: WORKER_TYPES.MINER, + tag: 't-miner', + aggrFields: { + [AGGR_FIELDS.POWER_MODE_GROUP]: 1, + [AGGR_FIELDS.STATUS_GROUP]: 1 + }, + limit + } + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG, rpcPayload) + + const log = processPowerModeTimelineData(results, container) + + return { log } +} + +function processPowerModeTimelineData (results, containerFilter) { + const minerTimelines = {} + + for (const entry of iterateRpcEntries(results)) { + const ts = parseEntryTs(entry.ts || entry.timestamp) + if (!ts) continue + + const powerModeObj = entry[AGGR_FIELDS.POWER_MODE_GROUP] || entry.aggrFields?.[AGGR_FIELDS.POWER_MODE_GROUP] || {} + const statusObj = entry[AGGR_FIELDS.STATUS_GROUP] || entry.aggrFields?.[AGGR_FIELDS.STATUS_GROUP] || {} + + if (typeof powerModeObj === 'object' && powerModeObj !== null) { + for (const [minerId, powerMode] of Object.entries(powerModeObj)) { + if (!minerTimelines[minerId]) minerTimelines[minerId] = [] + minerTimelines[minerId].push({ + ts, + powerMode: powerMode || 'unknown', + status: statusObj[minerId] || 'unknown' + }) + } + } + } + + const log = [] + for (const [minerId, entries] of Object.entries(minerTimelines)) { + entries.sort((a, b) => a.ts - b.ts) + + const container = extractContainerFromMinerId(minerId) + + if (containerFilter && container !== containerFilter) continue + + const segments = [] + let current = null + + for (const entry of entries) { + if (!current || current.powerMode !== entry.powerMode || current.status !== entry.status) { + if (current) { + current.to = entry.ts + segments.push(current) + } + current = { from: entry.ts, to: entry.ts, powerMode: entry.powerMode, status: entry.status } + } else { + current.to = entry.ts + } + } + if (current) segments.push(current) + + log.push({ minerId, container, segments }) + } + + return log +} + +// ==================== Temperature ==================== + +async function getTemperature (ctx, req) { + const { start, end } = validateStartEnd(req) + + const interval = resolveInterval(start, end, req.query.interval) + const config = getIntervalConfig(interval) + const limit = Math.ceil((end - start) / config.divisorMs) + const container = req.query.container || null + + const rpcPayload = { + key: config.key, + type: WORKER_TYPES.MINER, + tag: 't-miner', + aggrFields: { + [AGGR_FIELDS.TEMP_MAX]: 1, + [AGGR_FIELDS.TEMP_AVG]: 1 + }, + shouldCalculateAvg: true, + limit + } + + if (config.groupRange) { + rpcPayload.groupRange = config.groupRange + } + + const results = await requestRpcEachLimit(ctx, RPC_METHODS.TAIL_LOG, rpcPayload) + + const timePoints = processTemperatureData(results, config.groupRange, container) + const log = Object.keys(timePoints).sort().map(ts => ({ + ts: Number(ts), + ...timePoints[ts] + })) + + const summary = calculateTemperatureSummary(log) + + return { log, summary } +} + +function processTemperatureData (results, groupRange, containerFilter) { + const timePoints = {} + const avgCounts = {} + + for (const entry of iterateRpcEntries(results)) { + const rawTs = parseEntryTs(entry.ts || entry.timestamp) + const ts = groupRange && rawTs ? getStartOfDay(rawTs) : rawTs + if (!ts) continue + + const maxObj = entry[AGGR_FIELDS.TEMP_MAX] || entry.aggrFields?.[AGGR_FIELDS.TEMP_MAX] || {} + const avgObj = entry[AGGR_FIELDS.TEMP_AVG] || entry.aggrFields?.[AGGR_FIELDS.TEMP_AVG] || {} + + if (!timePoints[ts]) { + timePoints[ts] = { containers: {}, siteMaxC: null, siteAvgC: null } + avgCounts[ts] = {} + } + + const point = timePoints[ts] + + if (typeof maxObj === 'object' && maxObj !== null) { + for (const [name, maxVal] of Object.entries(maxObj)) { + if (containerFilter && name !== containerFilter) continue + const numMax = Number(maxVal) || 0 + const numAvg = Number(avgObj[name]) || 0 + + if (!point.containers[name]) { + point.containers[name] = { maxC: numMax, avgC: numAvg } + avgCounts[ts][name] = 1 + } else { + point.containers[name].maxC = Math.max(point.containers[name].maxC, numMax) + const count = avgCounts[ts][name] + point.containers[name].avgC = (point.containers[name].avgC * count + numAvg) / (count + 1) + avgCounts[ts][name] = count + 1 + } + } + } + + const containerVals = Object.values(point.containers) + if (containerVals.length) { + point.siteMaxC = Math.max(...containerVals.map(c => c.maxC)) + const avgSum = containerVals.reduce((sum, c) => sum + c.avgC, 0) + point.siteAvgC = safeDiv(avgSum, containerVals.length) + } + } + return timePoints +} + +function calculateTemperatureSummary (log) { + if (!log.length) { + return { + avgMaxTemp: null, + avgAvgTemp: null, + peakTemp: null + } + } + + const maxTemps = log.filter(e => e.siteMaxC !== null).map(e => e.siteMaxC) + const avgTemps = log.filter(e => e.siteAvgC !== null).map(e => e.siteAvgC) + + return { + avgMaxTemp: maxTemps.length ? safeDiv(maxTemps.reduce((a, b) => a + b, 0), maxTemps.length) : null, + avgAvgTemp: avgTemps.length ? safeDiv(avgTemps.reduce((a, b) => a + b, 0), avgTemps.length) : null, + peakTemp: maxTemps.length ? Math.max(...maxTemps) : null + } +} + +module.exports = { + getHashrate, + processHashrateData, + calculateHashrateSummary, + getConsumption, + processConsumptionData, + calculateConsumptionSummary, + getEfficiency, + processEfficiencyData, + calculateEfficiencySummary, + getMinerStatus, + processMinerStatusData, + calculateMinerStatusSummary, + sumObjectValues, + parseEntryTs, + resolveInterval, + getIntervalConfig, + getPowerMode, + processPowerModeData, + calculatePowerModeSummary, + categorizeMiner, + getPowerModeTimeline, + processPowerModeTimelineData, + getTemperature, + processTemperatureData, + calculateTemperatureSummary +} diff --git a/workers/lib/server/index.js b/workers/lib/server/index.js index 840e405..11991cf 100644 --- a/workers/lib/server/index.js +++ b/workers/lib/server/index.js @@ -12,6 +12,7 @@ const financeRoutes = require('./routes/finance.routes') const poolsRoutes = require('./routes/pools.routes') const poolManagerRoutes = require('./routes/poolManager.routes') const siteRoutes = require('./routes/site.routes') +const metricsRoutes = require('./routes/metrics.routes') /** * Collect all routes into a flat array for server injection. @@ -30,7 +31,8 @@ function routes (ctx) { ...financeRoutes(ctx), ...poolsRoutes(ctx), ...poolManagerRoutes(ctx), - ...siteRoutes(ctx) + ...siteRoutes(ctx), + ...metricsRoutes(ctx) ] } diff --git a/workers/lib/server/routes/metrics.routes.js b/workers/lib/server/routes/metrics.routes.js new file mode 100644 index 0000000..6104da8 --- /dev/null +++ b/workers/lib/server/routes/metrics.routes.js @@ -0,0 +1,147 @@ +'use strict' + +const { + ENDPOINTS, + HTTP_METHODS +} = require('../../constants') +const { + getHashrate, + getConsumption, + getEfficiency, + getMinerStatus, + getPowerMode, + getPowerModeTimeline, + getTemperature +} = require('../handlers/metrics.handlers') +const { createCachedAuthRoute } = require('../lib/routeHelpers') + +module.exports = (ctx) => { + const schemas = require('../schemas/metrics.schemas.js') + + return [ + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_HASHRATE, + schema: { + querystring: schemas.query.hashrate + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/hashrate', + req.query.start, + req.query.end + ], + ENDPOINTS.METRICS_HASHRATE, + getHashrate + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_CONSUMPTION, + schema: { + querystring: schemas.query.consumption + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/consumption', + req.query.start, + req.query.end + ], + ENDPOINTS.METRICS_CONSUMPTION, + getConsumption + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_EFFICIENCY, + schema: { + querystring: schemas.query.efficiency + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/efficiency', + req.query.start, + req.query.end + ], + ENDPOINTS.METRICS_EFFICIENCY, + getEfficiency + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_MINER_STATUS, + schema: { + querystring: schemas.query.minerStatus + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/miner-status', + req.query.start, + req.query.end + ], + ENDPOINTS.METRICS_MINER_STATUS, + getMinerStatus + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_POWER_MODE, + schema: { + querystring: schemas.query.powerMode + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/power-mode', + req.query.start, + req.query.end, + req.query.interval + ], + ENDPOINTS.METRICS_POWER_MODE, + getPowerMode + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_POWER_MODE_TIMELINE, + schema: { + querystring: schemas.query.powerModeTimeline + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/power-mode/timeline', + req.query.start, + req.query.end, + req.query.container, + req.query.limit + ], + ENDPOINTS.METRICS_POWER_MODE_TIMELINE, + getPowerModeTimeline + ) + }, + { + method: HTTP_METHODS.GET, + url: ENDPOINTS.METRICS_TEMPERATURE, + schema: { + querystring: schemas.query.temperature + }, + ...createCachedAuthRoute( + ctx, + (req) => [ + 'metrics/temperature', + req.query.start, + req.query.end, + req.query.interval, + req.query.container + ], + ENDPOINTS.METRICS_TEMPERATURE, + getTemperature + ) + } + ] +} diff --git a/workers/lib/server/schemas/metrics.schemas.js b/workers/lib/server/schemas/metrics.schemas.js new file mode 100644 index 0000000..6141d6d --- /dev/null +++ b/workers/lib/server/schemas/metrics.schemas.js @@ -0,0 +1,75 @@ +'use strict' + +const schemas = { + query: { + hashrate: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + }, + consumption: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + }, + efficiency: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + }, + minerStatus: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + }, + powerMode: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + interval: { type: 'string', enum: ['1h', '1d', '1w'] }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + }, + powerModeTimeline: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + container: { type: 'string' }, + limit: { type: 'integer' }, + overwriteCache: { type: 'boolean' } + } + }, + temperature: { + type: 'object', + properties: { + start: { type: 'integer' }, + end: { type: 'integer' }, + interval: { type: 'string', enum: ['1h', '1d', '1w'] }, + container: { type: 'string' }, + overwriteCache: { type: 'boolean' } + }, + required: ['start', 'end'] + } + } +} + +module.exports = schemas