@instana/core
Advanced tools
Comparing version 1.107.0 to 1.108.0
{ | ||
"name": "@instana/core", | ||
"version": "1.107.0", | ||
"version": "1.108.0", | ||
"description": "Core library for Instana's Node.js packages", | ||
@@ -136,3 +136,3 @@ "main": "src/index.js", | ||
}, | ||
"gitHead": "813a22cdd52f0fd1caa2112697892ddabf41ee05" | ||
"gitHead": "7fc0b5e5acc03cbce059778d8b85d4e862b56470" | ||
} |
@@ -105,2 +105,5 @@ 'use strict'; | ||
instrumentationModules[instrumentationKey].init(_config); | ||
if (instrumentationModules[instrumentationKey].batchable && instrumentationModules[instrumentationKey].spanName) { | ||
spanBuffer.addBatchableSpanName(instrumentationModules[instrumentationKey].spanName); | ||
} | ||
}); | ||
@@ -175,2 +178,7 @@ additionalInstrumentationModules.forEach(instrumentationModule => { | ||
// This will be removed again after the opt-in transition phase. | ||
exports.enableSpanBatching = function enableSpanBatching() { | ||
spanBuffer.enableSpanBatching(); | ||
}; | ||
exports._getAndResetTracingMetrics = function _getAndResetTracingMetrics() { | ||
@@ -177,0 +185,0 @@ return { |
@@ -10,2 +10,5 @@ 'use strict'; | ||
exports.spanName = 'elasticsearch'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -71,3 +74,3 @@ requireHook.onModuleLoad('elasticsearch', instrument); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('elasticsearch', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(instrumentedAction); | ||
@@ -74,0 +77,0 @@ span.data.elasticsearch = { |
@@ -21,2 +21,5 @@ 'use strict'; | ||
exports.spanName = 'elasticsearch'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -117,3 +120,3 @@ requireHook.onModuleLoad('@elastic/elasticsearch', instrument); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('elasticsearch', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(instrumentedAction); | ||
@@ -343,3 +346,3 @@ span.data.elasticsearch = { | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('elasticsearch', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(instrumentedRequest, 1); | ||
@@ -346,0 +349,0 @@ span.data.elasticsearch = { |
@@ -12,2 +12,5 @@ 'use strict'; | ||
exports.spanName = 'redis'; | ||
exports.batchable = true; | ||
exports.activate = function activate() { | ||
@@ -43,3 +46,3 @@ isActive = true; | ||
if ( | ||
parentSpan.n === 'redis' && | ||
parentSpan.n === exports.spanName && | ||
(parentSpan.data.redis.command === 'multi' || parentSpan.data.redis.command === 'pipeline') && | ||
@@ -62,3 +65,3 @@ // the multi call is handled in instrumentMultiCommand but since multi is also send to Redis it will also | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('redis', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(wrappedInternalSendCommand); | ||
@@ -122,3 +125,3 @@ span.data.redis = { | ||
cls.ns.enter(clsContextForMultiOrPipeline); | ||
const span = cls.startSpan('redis', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(wrappedInternalMultiOrPipelineCommand); | ||
@@ -125,0 +128,0 @@ span.data.redis = { |
@@ -27,2 +27,5 @@ 'use strict'; | ||
exports.spanName = 'mongo'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -74,3 +77,3 @@ // mongodb >= 3.3.x | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('mongo', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(instrumentedWrite); | ||
@@ -77,0 +80,0 @@ |
@@ -12,2 +12,5 @@ 'use strict'; | ||
exports.spanName = 'mssql'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -61,3 +64,3 @@ requireHook.onModuleLoad('mssql', instrumentMssql); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('mssql', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(stackTraceRef); | ||
@@ -64,0 +67,0 @@ span.data.mssql = { |
@@ -12,2 +12,5 @@ 'use strict'; | ||
exports.spanName = 'mysql'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -141,3 +144,3 @@ requireHook.onModuleLoad('mysql', instrumentMysql); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('mysql', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.b = { s: 1 }; | ||
@@ -144,0 +147,0 @@ span.stack = tracingUtil.getStackTrace(instrumentedAccessFunction); |
@@ -12,2 +12,5 @@ 'use strict'; | ||
exports.spanName = 'postgres'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -54,3 +57,3 @@ requireHook.onModuleLoad('pg', instrumentPg); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('postgres', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(instrumentedQuery); | ||
@@ -57,0 +60,0 @@ span.data.pg = { |
@@ -21,2 +21,5 @@ 'use strict'; | ||
exports.spanName = 'postgres'; | ||
exports.batchable = true; | ||
exports.init = function init() { | ||
@@ -125,3 +128,3 @@ requireHook.onModuleLoad('pg-native', instrumentPgNative); | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('postgres', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(stackTraceRef); | ||
@@ -160,3 +163,3 @@ span.data.pg = { | ||
return cls.ns.runAndReturn(() => { | ||
const span = cls.startSpan('postgres', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
span.stack = tracingUtil.getStackTrace(stackTraceRef); | ||
@@ -163,0 +166,0 @@ span.data.pg = { |
@@ -13,2 +13,5 @@ 'use strict'; | ||
exports.spanName = 'redis'; | ||
exports.batchable = true; | ||
exports.activate = function activate() { | ||
@@ -74,3 +77,3 @@ isActive = true; | ||
const span = cls.startSpan('redis', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
// do not set the redis span as the current span | ||
@@ -130,3 +133,3 @@ cls.setCurrentSpan(parentSpan); | ||
const span = cls.startSpan('redis', constants.EXIT); | ||
const span = cls.startSpan(exports.spanName, constants.EXIT); | ||
// do not set the redis span as the current span | ||
@@ -133,0 +136,0 @@ cls.setCurrentSpan(parentSpan); |
@@ -10,2 +10,3 @@ 'use strict'; | ||
const batchableSpanNames = []; | ||
let downstreamConnection = null; | ||
@@ -15,3 +16,3 @@ let isActive = false; | ||
let minDelayBeforeSendingSpans; | ||
let minDelayBeforeSendingSpans = 1000; | ||
if (process.env.INSTANA_DEV_MIN_DELAY_BEFORE_SENDING_SPANS != null) { | ||
@@ -22,5 +23,4 @@ minDelayBeforeSendingSpans = parseInt(process.env.INSTANA_DEV_MIN_DELAY_BEFORE_SENDING_SPANS, 10); | ||
} | ||
} else { | ||
minDelayBeforeSendingSpans = 1000; | ||
} | ||
let initialDelayBeforeSendingSpans; | ||
@@ -30,8 +30,36 @@ let transmissionDelay; | ||
let forceTransmissionStartingAt; | ||
let transmissionTimeoutHandle; | ||
let spans = []; | ||
let transmissionTimeoutHandle; | ||
let batchThreshold = 10; | ||
let batchingEnabled = false; | ||
if (process.env.INSTANA_DEV_BATCH_THRESHOLD != null) { | ||
batchThreshold = parseInt(process.env.INSTANA_DEV_BATCH_THRESHOLD, 10); | ||
if (isNaN(batchThreshold)) { | ||
batchThreshold = 10; | ||
} | ||
} | ||
const batchBucketWidth = 18; | ||
exports.init = function(config, _downstreamConnection) { | ||
// We keep a map of maps to store spans that can potentially be batched, to find partner spans for batching quicker | ||
// when a new batchable span arrives. (Otherwise we would have to iterate over _all_ spans in the span buffer whenever | ||
// a batchable span is added.) | ||
// | ||
// The first level key is the trace ID, that is, we keep spans from different traces separate. The second key is the | ||
// _end_ timestamp of the span (span.ts + span.d), rounded down to a multiple of 18 (batchBucketWidth). That is, we | ||
// sort batchable spans in buckets that are 18 ms wide. When a new span arrives, we only have to examine two buckets | ||
// (the bucket that the new span would land in and the previous one). Why 18? Because the distance between two span | ||
// eligible to be merged together can be at most 18 ms (9 ms allowed gap between spans + 9 ms duration of the later | ||
// span). | ||
// | ||
// By only inspecting the current and the previous bucket, we might miss possible batch pairs when a span that ended | ||
// chronologically earlier (span.ts + span.d) is added to the buffer later than its potential partner. To guarantee | ||
// that such pairs are also always found we would have to check the following bucket, too. Since this should be very | ||
// rare, we omit the check, trading better perfomance for a few missed batch opportunities (if any). | ||
// | ||
// The batchingBuckets are cleared once the span buffer is flushed downstream. | ||
let batchingBuckets = new Map(); | ||
exports.init = function init(config, _downstreamConnection) { | ||
downstreamConnection = _downstreamConnection; | ||
@@ -41,6 +69,7 @@ maxBufferedSpans = config.tracing.maxBufferedSpans; | ||
transmissionDelay = config.tracing.transmissionDelay; | ||
batchingEnabled = config.tracing.spanBatchingEnabled; | ||
initialDelayBeforeSendingSpans = Math.max(transmissionDelay, minDelayBeforeSendingSpans); | ||
}; | ||
exports.activate = function() { | ||
exports.activate = function activate() { | ||
if (!downstreamConnection) { | ||
@@ -65,2 +94,3 @@ logger.error('No downstreamConnection has been set.'); | ||
spans = []; | ||
batchingBuckets.clear(); | ||
transmissionTimeoutHandle = setTimeout(transmitSpans, initialDelayBeforeSendingSpans); | ||
@@ -70,8 +100,19 @@ transmissionTimeoutHandle.unref(); | ||
exports.deactivate = function() { | ||
exports.deactivate = function deactivate() { | ||
isActive = false; | ||
spans = []; | ||
batchingBuckets.clear(); | ||
clearTimeout(transmissionTimeoutHandle); | ||
}; | ||
exports.enableSpanBatching = function enableSpanBatching() { | ||
batchingEnabled = true; | ||
}; | ||
exports.addBatchableSpanName = function(spanName) { | ||
if (!batchableSpanNames.includes(spanName)) { | ||
batchableSpanNames.push(spanName); | ||
} | ||
}; | ||
exports.addSpan = function(span) { | ||
@@ -86,9 +127,193 @@ if (!isActive) { | ||
} | ||
spans.push(span); | ||
if (spans.length >= forceTransmissionStartingAt && Date.now() - minDelayBeforeSendingSpans > activatedAt) { | ||
transmitSpans(); | ||
const spanIsBatchable = batchingEnabled && isBatchable(span); | ||
if (!spanIsBatchable || !addToBatch(span)) { | ||
// add span to span buffer, it will be sent downstream with the next transmission | ||
spans.push(span); | ||
if (spanIsBatchable) { | ||
addToBucket(span); | ||
} | ||
if (spans.length >= forceTransmissionStartingAt && Date.now() - minDelayBeforeSendingSpans > activatedAt) { | ||
transmitSpans(); | ||
} | ||
} | ||
}; | ||
function addToBatch(span) { | ||
if (!batchingBuckets.has(span.t)) { | ||
// If we do not yet have any spans for this trace, we cannot batch anything either. | ||
return false; | ||
} | ||
// A potential partner span for batching can be in the bucket this span would land in or in one of the neighbouring | ||
// buckets. Theoretically the spans could come in out of order but for performance reason we only support the most | ||
// common case: The span that ended later (according to span.ts + span.d) is also added to the span buffer later. Thus | ||
// we check the span's own bucket and the previous bucket. | ||
const bucketsForTrace = batchingBuckets.get(span.t); | ||
const key = batchingBucketKey(span); | ||
let hasBeenBatched = findBatchPartnerAndMerge(span, bucketsForTrace, key); | ||
if (hasBeenBatched) { | ||
return true; | ||
} | ||
const previousKey = key - batchBucketWidth; | ||
return findBatchPartnerAndMerge(span, bucketsForTrace, previousKey); | ||
} | ||
function findBatchPartnerAndMerge(newSpan, bucketsForTrace, bucketKey) { | ||
const bucket = bucketsForTrace.get(bucketKey); | ||
if (!bucket) { | ||
// We have not seen any spans for that bucket yet. | ||
return false; | ||
} | ||
for (let i = 0; i < bucket.length; i++) { | ||
const bufferedSpan = bucket[i]; | ||
// Note: We do not need to check the span.d < 10 ms condition here because only short spans are added to the buckets | ||
// in the first place. | ||
if ( | ||
// Only merge spans from the same trace, | ||
bufferedSpan.t === newSpan.t && | ||
// with the same parent, | ||
bufferedSpan.p === newSpan.p && | ||
// and the same type, | ||
bufferedSpan.n === newSpan.n && | ||
// with a gap of less than 10 ms in between. | ||
(newSpan.ts >= bufferedSpan.ts | ||
? newSpan.ts < bufferedSpan.ts + bufferedSpan.d + batchThreshold | ||
: bufferedSpan.ts < newSpan.ts + newSpan.d + batchThreshold) | ||
) { | ||
mergeSpansAsBatch(bufferedSpan, newSpan, bucket, bucketKey, i); | ||
return true; | ||
} | ||
} | ||
return false; | ||
} | ||
function mergeSpansAsBatch(oldSpan, newSpan, bucket, bucketKey, indexInBucket) { | ||
// Determine, if the new span (about to be added to the buffer) is more significant than the old span that is already | ||
// in the buffer. Determine significance by: | ||
// 1. span.ec (higher wins) | ||
// 2. duration (higher wins) | ||
// 3. start time (earlier wins) | ||
let mustSwap; | ||
if (newSpan.ec > oldSpan.ec) { | ||
mustSwap = true; | ||
} else if (newSpan.ec === oldSpan.ec && newSpan.d > oldSpan.d) { | ||
mustSwap = true; | ||
} else if (newSpan.ec === oldSpan.ec && newSpan.d === oldSpan.d && newSpan.ts < oldSpan.ts) { | ||
mustSwap = true; | ||
} | ||
if (mustSwap) { | ||
// The new span is more significant, put the new span into the span buffer and merge the old span into it. | ||
const indexInSpanBuffer = spans.indexOf(oldSpan); | ||
if (indexInSpanBuffer >= 0) { | ||
spans[indexInSpanBuffer] = newSpan; | ||
} | ||
bucket[indexInBucket] = newSpan; | ||
mergeIntoTargetSpan(newSpan, oldSpan, bucketKey); | ||
} else { | ||
// The old span is at least as significant as the new span, keep it in the span buffer and merge the | ||
// new span into it. | ||
mergeIntoTargetSpan(oldSpan, newSpan, bucketKey); | ||
} | ||
} | ||
/* | ||
* Merges the source span into the target span. Assumes that target is already in the spanBuffer and source can be | ||
* discarded afterwards. | ||
*/ | ||
function mergeIntoTargetSpan(target, source, originalBucketKey) { | ||
target.b = target.b || {}; | ||
// Sum durations into span.b.d (batch duration). If one or both spans already are batched (and have a batch duration), | ||
// prefer that value over the span duration. | ||
if (target.b.d != null && source.b && source.b.d != null) { | ||
target.b.d += source.b.d; | ||
} else if (target.b.d != null) { | ||
target.b.d += source.d; | ||
} else if (source.b && source.b.d != null) { | ||
target.b.d = target.d + source.b.d; | ||
} else { | ||
target.b.d = target.d + source.d; | ||
} | ||
// Calculate latest end timestamp. | ||
const latestEnd = Math.max(target.ts + target.d, source.ts + source.d); | ||
// The batched span starts at the earliest timestamp. | ||
target.ts = Math.min(target.ts, source.ts); | ||
// Set duration of merged span to the distance between earliest start timestamp and latest end timestamp. | ||
target.d = latestEnd - target.ts; | ||
// Sum up error count. | ||
target.ec += source.ec; | ||
setBatchSize(target, source); | ||
// After changing span.ts and span.d we might need to put the span into an additional bucket. | ||
const newBucketKey = batchingBucketKey(target); | ||
if (originalBucketKey !== newBucketKey) { | ||
addToBucket(target, newBucketKey); | ||
} | ||
} | ||
function setBatchSize(target, source) { | ||
if (target.b && target.b.s && source.b && source.b.s) { | ||
// Both spans already have a batch size, add them up. Note: It is rare that source already has batch properties, | ||
// but it can happen, for example because of batching of redis multi calls/batch calls directly in the redis | ||
// instrumentation. | ||
target.b.s += source.b.s; | ||
return; | ||
} else if (target.b && target.b.s) { | ||
// The old span has a batch size but the new one hasn't, simply increment by one. | ||
target.b.s += 1; | ||
return; | ||
} | ||
if (source.b && source.b.s) { | ||
// Only the new span has a batch size, | ||
target.b.s = source.b.s + 1; | ||
} else { | ||
target.b.s = 2; | ||
} | ||
} | ||
function addToBucket(span, preComputedBucketKey) { | ||
// Put batcheable spans from the same trace into time-based buckets so we can find them for batching when more | ||
// spans are added later. | ||
const bucketKey = preComputedBucketKey || batchingBucketKey(span); | ||
if (!batchingBuckets.has(span.t)) { | ||
batchingBuckets.set(span.t, new Map()); | ||
} | ||
if (!batchingBuckets.get(span.t).has(bucketKey)) { | ||
batchingBuckets.get(span.t).set(bucketKey, []); | ||
} | ||
batchingBuckets | ||
.get(span.t) | ||
.get(bucketKey) | ||
.push(span); | ||
} | ||
function batchingBucketKey(span) { | ||
const spanEnd = span.ts + span.d; | ||
return spanEnd - (spanEnd % batchBucketWidth); | ||
} | ||
function isBatchable(span) { | ||
return ( | ||
// Only batch spans shorter than 10 ms. | ||
span.d < batchThreshold && | ||
// Only batch spans which have a parent (cannot batch a root span). | ||
span.p && | ||
// Only batch spans which are batchable in principle because it is guaranteed to be a leave in the trace tree. | ||
batchableSpanNames.includes(span.n) | ||
); | ||
} | ||
function transmitSpans() { | ||
@@ -105,2 +330,6 @@ clearTimeout(transmissionTimeoutHandle); | ||
spans = []; | ||
batchingBuckets.clear(); | ||
// We restore the content of the spans array if sending them downstream was not successful. We do not restore | ||
// batchingBuckets, though. This is deliberate. In the worst case, we might miss some batching opportunities, but | ||
// since sending spans downstream will take a few milliseconds, even that will be rare (and it is acceptable). | ||
@@ -126,2 +355,3 @@ downstreamConnection.sendSpans(spansToSend, function sendSpans(error) { | ||
spans = []; | ||
batchingBuckets.clear(); | ||
return spansToSend; | ||
@@ -128,0 +358,0 @@ }; |
@@ -6,2 +6,7 @@ 'use strict'; | ||
let logger; | ||
logger = require('../logger').getLogger('util/atMostOnce', newLogger => { | ||
logger = newLogger; | ||
}); | ||
// Cache determined main package json as these will be referenced often | ||
@@ -46,2 +51,3 @@ // and identification of these values is expensive. | ||
} catch (e) { | ||
logger.warn('Main package.json file %s cannot be parsed: %s', packageJsonPath, e); | ||
return cb(e, null); | ||
@@ -48,0 +54,0 @@ } |
@@ -29,3 +29,4 @@ /* eslint-disable */ | ||
stackTraceLength: 10, | ||
disabledTracers: [] | ||
disabledTracers: [], | ||
spanBatchingEnabled: false | ||
}, | ||
@@ -98,2 +99,3 @@ secrets: { | ||
normalizeDisabledTracers(config); | ||
normalizeSpanBatchingEnabled(config); | ||
} | ||
@@ -291,2 +293,27 @@ | ||
function normalizeSpanBatchingEnabled(config) { | ||
if (config.tracing.spanBatchingEnabled != null) { | ||
if (typeof config.tracing.spanBatchingEnabled === 'boolean') { | ||
if (config.tracing.spanBatchingEnabled) { | ||
logger.info('Span batching is enabled via config.'); | ||
} | ||
return; | ||
} else { | ||
logger.warn( | ||
`Invalid configuration: config.tracing.spanBatchingEnabled is not a boolean value, will be ignored: ${JSON.stringify( | ||
config.tracing.spanBatchingEnabled | ||
)}` | ||
); | ||
} | ||
} | ||
if (process.env['INSTANA_SPANBATCHING_ENABLED'] === 'true') { | ||
logger.info('Span batching is enabled via environment variable INSTANA_SPANBATCHING_ENABLED.'); | ||
config.tracing.spanBatchingEnabled = true; | ||
return; | ||
} | ||
config.tracing.spanBatchingEnabled = defaults.tracing.spanBatchingEnabled; | ||
} | ||
function normalizeSecrets(config) { | ||
@@ -293,0 +320,0 @@ if (config.secrets == null) { |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Native code
Supply chain riskContains native code (e.g., compiled binaries or shared libraries). Including native code can obscure malicious behavior.
Found 2 instances in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 2 instances in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
460670
89
10916
28
10