opentelemetry-node-metrics
Advanced tools
Comparing version 1.0.1 to 1.0.2
@@ -23,47 +23,51 @@ 'use strict' | ||
const lag = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG, { | ||
description: 'Lag of event loop in seconds.' | ||
}).bind(labels) | ||
const lagMin = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MIN, { | ||
description: 'The minimum recorded event loop delay.' | ||
}) | ||
}).bind(labels) | ||
const lagMax = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MAX, { | ||
description: 'The maximum recorded event loop delay.' | ||
}) | ||
}).bind(labels) | ||
const lagMean = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MEAN, { | ||
description: 'The mean of the recorded event loop delays.' | ||
}) | ||
}).bind(labels) | ||
const lagStddev = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_STDDEV, { | ||
description: 'The standard deviation of the recorded event loop delays.' | ||
}) | ||
}).bind(labels) | ||
const lagP50 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P50, { | ||
description: 'The 50th percentile of the recorded event loop delays.' | ||
}) | ||
}).bind(labels) | ||
const lagP90 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P90, { | ||
description: 'The 90th percentile of the recorded event loop delays.' | ||
}) | ||
}).bind(labels) | ||
const lagP99 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P99, { | ||
description: 'The 99th percentile of the recorded event loop delays.' | ||
}) | ||
}).bind(labels) | ||
const lag = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG, { | ||
description: 'Lag of event loop in seconds.' | ||
}) | ||
function reportEventloopLag (start, observerBatchResult) { | ||
function reportEventloopLag (start) { | ||
const delta = process.hrtime(start) | ||
const nanosec = (delta[0] * 1e9) + delta[1] | ||
const seconds = nanosec / 1e9 | ||
observerBatchResult.observe(labels, [lag.observation(seconds)]) | ||
lag.update(seconds) | ||
} | ||
meter.createBatchObserver((observerBatchResult) => { | ||
setImmediate(reportEventloopLag, process.hrtime(), observerBatchResult) | ||
setImmediate(reportEventloopLag, process.hrtime()) | ||
observerBatchResult.observe(labels, [ | ||
lagMin.observation(histogram.min / 1e9), | ||
lagMax.observation(histogram.max / 1e9), | ||
lagMean.observation(histogram.mean / 1e9), | ||
lagStddev.observation(histogram.stddev / 1e9), | ||
lagP50.observation(histogram.percentile(50) / 1e9), | ||
lagP90.observation(histogram.percentile(90) / 1e9), | ||
lagP99.observation(histogram.percentile(99) / 1e9) | ||
]) | ||
lagMin.update(histogram.min / 1e9) | ||
lagMax.update(histogram.max / 1e9) | ||
lagMean.update(histogram.mean / 1e9) | ||
lagStddev.update(histogram.stddev / 1e9) | ||
lagP50.update(histogram.percentile(50) / 1e9) | ||
lagP90.update(histogram.percentile(90) / 1e9) | ||
lagP99.update(histogram.percentile(99) / 1e9) | ||
}) | ||
@@ -70,0 +74,0 @@ |
'use strict' | ||
const perfHooks = require('perf_hooks') | ||
const {PerformanceObserver, constants} = require('perf_hooks') | ||
@@ -7,12 +7,6 @@ const NODEJS_GC_DURATION_SECONDS = 'nodejs_gc_duration_seconds' | ||
const kinds = [] | ||
kinds[perfHooks.constants.NODE_PERFORMANCE_GC_MAJOR] = 'major' | ||
kinds[perfHooks.constants.NODE_PERFORMANCE_GC_MINOR] = 'minor' | ||
kinds[perfHooks.constants.NODE_PERFORMANCE_GC_INCREMENTAL] = 'incremental' | ||
kinds[perfHooks.constants.NODE_PERFORMANCE_GC_WEAKCB] = 'weakcb' | ||
module.exports = (meter, {prefix, labels, gcDurationBuckets}) => { | ||
const boundaries = gcDurationBuckets || DEFAULT_GC_DURATION_BUCKETS | ||
const gcHistogram = meter.createValueRecorder(prefix + NODEJS_GC_DURATION_SECONDS, { | ||
const histogram = meter.createValueRecorder(prefix + NODEJS_GC_DURATION_SECONDS, { | ||
description: 'Garbage collection duration by kind, one of major, minor, incremental or weakcb.', | ||
@@ -22,9 +16,12 @@ boundaries | ||
const obs = new perfHooks.PerformanceObserver(list => { | ||
const kinds = {} | ||
kinds[constants.NODE_PERFORMANCE_GC_MAJOR] = histogram.bind({...labels, kind: 'major'}) | ||
kinds[constants.NODE_PERFORMANCE_GC_MINOR] = histogram.bind({...labels, kind: 'minor'}) | ||
kinds[constants.NODE_PERFORMANCE_GC_INCREMENTAL] = histogram.bind({...labels, kind: 'incremental'}) // eslint-disable-line max-len | ||
kinds[constants.NODE_PERFORMANCE_GC_WEAKCB] = histogram.bind({...labels, kind: 'weakcb'}) | ||
const obs = new PerformanceObserver(list => { | ||
const entry = list.getEntries()[0] | ||
gcHistogram | ||
.bind({...labels, kind: kinds[entry.kind]}) | ||
// Convert duration from milliseconds to seconds | ||
.record(entry.duration / 1000) | ||
// Convert duration from milliseconds to seconds | ||
kinds[entry.kind].record(entry.duration / 1000) | ||
}) | ||
@@ -31,0 +28,0 @@ |
@@ -9,20 +9,18 @@ const safeMemoryUsage = require('./helpers/safeMemoryUsage') | ||
description: 'Process heap size from Node.js in bytes.' | ||
}) | ||
}).bind(labels) | ||
const heapSizeUsed = meter.createValueObserver(prefix + NODEJS_HEAP_SIZE_USED, { | ||
description: 'Process heap size used from Node.js in bytes.' | ||
}) | ||
}).bind(labels) | ||
const externalMemUsed = meter.createValueObserver(prefix + NODEJS_EXTERNAL_MEMORY, { | ||
description: 'Node.js external memory size in bytes.' | ||
}) | ||
}).bind(labels) | ||
meter.createBatchObserver((observerBatchResult) => { | ||
meter.createBatchObserver(() => { | ||
const memUsage = safeMemoryUsage() | ||
if (!memUsage) return | ||
observerBatchResult.observe(labels, [ | ||
heapSizeTotal.observation(memUsage.heapTotal), | ||
heapSizeUsed.observation(memUsage.heapUsed), | ||
memUsage.external !== undefined ? externalMemUsed.observation(memUsage.external) : undefined | ||
]) | ||
heapSizeTotal.update(memUsage.heapTotal) | ||
heapSizeUsed.update(memUsage.heapUsed) | ||
if (memUsage.external !== undefined) externalMemUsed.update(memUsage.external) | ||
}) | ||
@@ -29,0 +27,0 @@ } |
function createAggregatorByObjectName () { | ||
const all = new Map() | ||
return function aggregateByObjectName (list) { | ||
return function aggregateByObjectName (metric, labels, list) { | ||
const current = new Map() | ||
for (const key of all.keys()) current.set(key, 0) | ||
@@ -12,5 +13,7 @@ for (let i = 0; i < list.length; i++) { | ||
for (const key of all.keys()) all.set(key, 0) | ||
for (const [key, value] of current) all.set(key, value) | ||
return all | ||
for (const [key, value] of current) { | ||
const instrument = all.get(key) || metric.bind({...labels, type: key}) | ||
instrument.update(value) | ||
all.set(key, instrument) | ||
} | ||
} | ||
@@ -17,0 +20,0 @@ } |
@@ -6,10 +6,10 @@ const linuxVariant = require('./osMemoryHeapLinux') | ||
function notLinuxVariant (meter, {prefix, labels}) { | ||
meter.createValueObserver(prefix + PROCESS_RESIDENT_MEMORY, { | ||
const boundMeter = meter.createValueObserver(prefix + PROCESS_RESIDENT_MEMORY, { | ||
description: 'Resident memory size in bytes.' | ||
}, (observerResult) => { | ||
}, () => { | ||
const memUsage = safeMemoryUsage() | ||
// I don't think the other things returned from | ||
// `process.memoryUsage()` is relevant to a standard export | ||
if (memUsage) observerResult.observe(memUsage.rss, labels) | ||
}) | ||
if (memUsage) boundMeter.update(memUsage.rss) | ||
}).bind(labels) | ||
} | ||
@@ -16,0 +16,0 @@ |
@@ -34,13 +34,13 @@ 'use strict' | ||
description: 'Resident memory size in bytes.' | ||
}) | ||
}).bind(labels) | ||
const virtualMemGauge = meter.createValueObserver(prefix + PROCESS_VIRTUAL_MEMORY, { | ||
description: 'Virtual memory size in bytes.' | ||
}) | ||
}).bind(labels) | ||
const heapSizeMemGauge = meter.createValueObserver(prefix + PROCESS_HEAP, { | ||
description: 'Process heap size in bytes.' | ||
}) | ||
}).bind(labels) | ||
meter.createBatchObserver((observerBatchResult) => { | ||
meter.createBatchObserver(() => { | ||
try { | ||
@@ -56,7 +56,5 @@ // Sync I/O is often problematic, but /proc isn't really I/O, it | ||
observerBatchResult.observe(labels, [ | ||
residentMemGauge.observation(structuredOutput.VmRSS), | ||
virtualMemGauge.observation(structuredOutput.VmSize), | ||
heapSizeMemGauge.observation(structuredOutput.VmData) | ||
]) | ||
residentMemGauge.update(structuredOutput.VmRSS) | ||
virtualMemGauge.update(structuredOutput.VmSize) | ||
heapSizeMemGauge.update(structuredOutput.VmData) | ||
} catch { | ||
@@ -63,0 +61,0 @@ // noop |
@@ -11,18 +11,14 @@ const {createAggregatorByObjectName} = require('./helpers/processMetricsHelpers') | ||
const aggregateByObjectName = createAggregatorByObjectName() | ||
meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES, { | ||
const activeHandlesMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES, { | ||
description: 'Number of active libuv handles grouped by handle type. Every handle type is C++ class name.' // eslint-disable-line max-len | ||
}, (observerResult) => { | ||
const handles = process._getActiveHandles() | ||
const data = aggregateByObjectName(handles) | ||
for (const [key, count] of data.entries()) { | ||
observerResult.observe(count, {...labels, type: key}) | ||
} | ||
}, () => { | ||
aggregateByObjectName(activeHandlesMetric, labels, process._getActiveHandles()) | ||
}) | ||
meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES_TOTAL, { | ||
const boundTotalMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES_TOTAL, { | ||
description: 'Total number of active handles.' | ||
}, (observerResult) => { | ||
}, () => { | ||
const handles = process._getActiveHandles() | ||
observerResult.observe(handles.length, labels) | ||
}) | ||
boundTotalMetric.update(handles.length) | ||
}).bind(labels) | ||
} | ||
@@ -29,0 +25,0 @@ |
@@ -8,5 +8,5 @@ const fs = require('fs') | ||
meter.createValueObserver(prefix + PROCESS_OPEN_FDS, { | ||
const boundInstrument = meter.createValueObserver(prefix + PROCESS_OPEN_FDS, { | ||
description: 'Number of open file descriptors.' | ||
}, (observerResult) => { | ||
}, () => { | ||
try { | ||
@@ -16,9 +16,9 @@ const fds = fs.readdirSync('/proc/self/fd') | ||
// it's now closed. | ||
observerResult.observe(fds.length - 1, labels) | ||
boundInstrument.update(fds.length - 1) | ||
} catch { | ||
// noop | ||
} | ||
}) | ||
}).bind(labels) | ||
} | ||
module.exports.metricNames = [PROCESS_OPEN_FDS] |
@@ -11,17 +11,13 @@ const {createAggregatorByObjectName} = require('./helpers/processMetricsHelpers') | ||
const aggregateByObjectName = createAggregatorByObjectName() | ||
meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS, { | ||
const activeRequestsMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS, { | ||
description: 'Number of active libuv requests grouped by request type. Every request type is C++ class name.' // eslint-disable-line max-len | ||
}, (observerResult) => { | ||
const requests = process._getActiveRequests() | ||
const data = aggregateByObjectName(requests) | ||
for (const [key, count] of data.entries()) { | ||
observerResult.observe(count, {...labels, type: key}) | ||
} | ||
}, () => { | ||
aggregateByObjectName(activeRequestsMetric, labels, process._getActiveRequests()) | ||
}) | ||
meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS_TOTAL, { | ||
const boundTotalRequests = meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS_TOTAL, { | ||
description: 'Total number of active requests.' | ||
}, (observerResult) => { | ||
observerResult.observe(process._getActiveRequests().length, labels) | ||
}) | ||
}, () => { | ||
boundTotalRequests.update(process._getActiveRequests().length) | ||
}).bind(labels) | ||
} | ||
@@ -28,0 +24,0 @@ |
{ | ||
"name": "opentelemetry-node-metrics", | ||
"version": "1.0.1", | ||
"version": "1.0.2", | ||
"description": "", | ||
@@ -18,2 +18,3 @@ "main": "index.js", | ||
}, | ||
"dependencies": {}, | ||
"ci": { | ||
@@ -23,9 +24,9 @@ "isCi": true, | ||
"service": "drone", | ||
"commit": "3adc39c559c00888f9c42b3aeac5dafa4566c941", | ||
"build": "3", | ||
"commit": "3342ad6aaf00cc693e4e10e144f7f0ba7ef497e4", | ||
"build": "5", | ||
"branch": "master", | ||
"isPr": false, | ||
"slug": "marcbachmann/opentelemetry-node-metrics", | ||
"date": "2020-12-28T11:46:28.096Z" | ||
"date": "2020-12-28T16:47:44.275Z" | ||
} | ||
} |
41784
699