diff --git a/common/telemetry.js b/common/telemetry.js index 35a37526..d6fcfc0b 100644 --- a/common/telemetry.js +++ b/common/telemetry.js @@ -25,6 +25,13 @@ const networkInfoWriteClient = influx.getWriteApi( 's' // precision ) +// Add new write client for batch metrics +const batchMetricsWriteClient = influx.getWriteApi( + 'Filecoin Station', // org + 'spark-batch-metrics', // bucket + 'ns' // precision +) + setInterval(() => { publishWriteClient.flush().catch(console.error) networkInfoWriteClient.flush().catch(console.error) diff --git a/publish/index.js b/publish/index.js index 72068d2a..f5469e49 100644 --- a/publish/index.js +++ b/publish/index.js @@ -53,6 +53,11 @@ export const publish = async ({ logger.log(`Publishing ${measurements.length} measurements. Total unpublished: ${totalCount}. Batch size: ${maxMeasurements}.`) + // Calculate batch size in bytes + const batchSizeBytes = Buffer.byteLength( + measurements.map(m => JSON.stringify(m)).join('\n') + ) + // Share measurements const start = new Date() const file = new File( @@ -126,7 +131,9 @@ export const publish = async ({ logger.log('Done!') + // Telemetry recording with batch size metrics recordTelemetry('publish', point => { + // Existing metrics point.intField('round_index', roundIndex) point.intField('measurements', measurements.length) point.floatField('load', totalCount / maxMeasurements) @@ -135,6 +142,10 @@ export const publish = async ({ uploadMeasurementsDuration ) point.intField('add_measurements_duration_ms', ieAddMeasurementsDuration) + + // Add batch size metrics to existing publish point + point.intField('batch_size_bytes', batchSizeBytes) + point.floatField('avg_measurement_size_bytes', batchSizeBytes / measurements.length) }) }