@opentelemetry/exporter-prometheus
Advanced tools
Comparing version 0.27.0 to 0.28.0
/// <reference types="node" /> | ||
import { ExportResult } from '@opentelemetry/core'; | ||
import { MetricExporter, MetricRecord } from '@opentelemetry/sdk-metrics-base'; | ||
import { MetricReader } from '@opentelemetry/sdk-metrics-base'; | ||
import { IncomingMessage, ServerResponse } from 'http'; | ||
import { ExporterConfig } from './export/types'; | ||
export declare class PrometheusExporter implements MetricExporter { | ||
export declare class PrometheusExporter extends MetricReader { | ||
static readonly DEFAULT_OPTIONS: { | ||
@@ -16,2 +15,3 @@ host: undefined; | ||
private readonly _port; | ||
private readonly _baseUrl; | ||
private readonly _endpoint; | ||
@@ -22,3 +22,2 @@ private readonly _server; | ||
private _serializer; | ||
private _batcher; | ||
/** | ||
@@ -30,20 +29,7 @@ * Constructor | ||
constructor(config?: ExporterConfig, callback?: () => void); | ||
onForceFlush(): Promise<void>; | ||
/** | ||
* Saves the current values of all exported {@link MetricRecord}s so that | ||
* they can be pulled by the Prometheus backend. | ||
* | ||
* In its current state, the exporter saves the current values of all metrics | ||
* when export is called and returns them when the export endpoint is called. | ||
* In the future, this should be a no-op and the exporter should reach into | ||
* the metrics when the export endpoint is called. As there is currently no | ||
* interface to do this, this is our only option. | ||
* | ||
* @param records Metrics to be sent to the prometheus backend | ||
* @param cb result callback to be called on finish | ||
*/ | ||
export(records: MetricRecord[], cb: (result: ExportResult) => void): void; | ||
/** | ||
* Shuts down the export server and clears the registry | ||
*/ | ||
shutdown(): Promise<void>; | ||
onShutdown(): Promise<void>; | ||
/** | ||
@@ -50,0 +36,0 @@ * Stops the Prometheus export server |
@@ -21,7 +21,9 @@ "use strict"; | ||
const core_1 = require("@opentelemetry/core"); | ||
const sdk_metrics_base_1 = require("@opentelemetry/sdk-metrics-base"); | ||
const http_1 = require("http"); | ||
const url = require("url"); | ||
const PrometheusSerializer_1 = require("./PrometheusSerializer"); | ||
const PrometheusAttributesBatcher_1 = require("./PrometheusAttributesBatcher"); | ||
class PrometheusExporter { | ||
/** Node.js v8.x compat */ | ||
const url_1 = require("url"); | ||
const NO_REGISTERED_METRICS = '# no registered metrics'; | ||
class PrometheusExporter extends sdk_metrics_base_1.MetricReader { | ||
// This will be required when histogram is implemented. Leaving here so it is not forgotten | ||
@@ -36,3 +38,3 @@ // Histogram cannot have a attribute named 'le' | ||
constructor(config = {}, callback) { | ||
this._batcher = new PrometheusAttributesBatcher_1.PrometheusAttributesBatcher(); | ||
super(sdk_metrics_base_1.AggregationTemporality.CUMULATIVE); | ||
/** | ||
@@ -46,3 +48,3 @@ * Request handler used by http library to respond to incoming requests | ||
this._requestHandler = (request, response) => { | ||
if (url.parse(request.url).pathname === this._endpoint) { | ||
if (request.url != null && new url_1.URL(request.url, this._baseUrl).pathname === this._endpoint) { | ||
this._exportMetrics(response); | ||
@@ -60,7 +62,15 @@ } | ||
response.setHeader('content-type', 'text/plain'); | ||
if (!this._batcher.hasMetric) { | ||
response.end('# no registered metrics'); | ||
return; | ||
} | ||
response.end(this._serializer.serialize(this._batcher.checkPointSet())); | ||
this.collect() | ||
.then(resourceMetrics => { | ||
let result = NO_REGISTERED_METRICS; | ||
if (resourceMetrics != null) { | ||
result = this._serializer.serialize(resourceMetrics); | ||
} | ||
if (result === '') { | ||
result = NO_REGISTERED_METRICS; | ||
} | ||
response.end(result); | ||
}, err => { | ||
response.end(`# failed to export metrics: ${err}`); | ||
}); | ||
}; | ||
@@ -88,4 +98,5 @@ /** | ||
// unref to prevent prometheus exporter from holding the process open on exit | ||
this._server = http_1.createServer(this._requestHandler).unref(); | ||
this._server = (0, http_1.createServer)(this._requestHandler).unref(); | ||
this._serializer = new PrometheusSerializer_1.PrometheusSerializer(this._prefix, this._appendTimestamp); | ||
this._baseUrl = `http://${this._host}:${this._port}/`; | ||
this._endpoint = (config.endpoint || PrometheusExporter.DEFAULT_OPTIONS.endpoint).replace(/^([^/])/, '/$1'); | ||
@@ -101,27 +112,4 @@ if (config.preventServerStart !== true) { | ||
} | ||
/** | ||
* Saves the current values of all exported {@link MetricRecord}s so that | ||
* they can be pulled by the Prometheus backend. | ||
* | ||
* In its current state, the exporter saves the current values of all metrics | ||
* when export is called and returns them when the export endpoint is called. | ||
* In the future, this should be a no-op and the exporter should reach into | ||
* the metrics when the export endpoint is called. As there is currently no | ||
* interface to do this, this is our only option. | ||
* | ||
* @param records Metrics to be sent to the prometheus backend | ||
* @param cb result callback to be called on finish | ||
*/ | ||
export(records, cb) { | ||
if (!this._server) { | ||
// It is conceivable that the _server may not be started as it is an async startup | ||
// However unlikely, if this happens the caller may retry the export | ||
cb({ code: core_1.ExportResultCode.FAILED }); | ||
return; | ||
} | ||
api_1.diag.debug('Prometheus exporter export'); | ||
for (const record of records) { | ||
this._batcher.process(record); | ||
} | ||
cb({ code: core_1.ExportResultCode.SUCCESS }); | ||
async onForceFlush() { | ||
/** do nothing */ | ||
} | ||
@@ -131,3 +119,3 @@ /** | ||
*/ | ||
shutdown() { | ||
onShutdown() { | ||
return this.stopServer(); | ||
@@ -152,3 +140,3 @@ } | ||
'ERR_SERVER_NOT_RUNNING') { | ||
core_1.globalErrorHandler(err); | ||
(0, core_1.globalErrorHandler)(err); | ||
} | ||
@@ -155,0 +143,0 @@ } |
@@ -1,3 +0,2 @@ | ||
import { MetricRecord } from '@opentelemetry/sdk-metrics-base'; | ||
import { PrometheusCheckpoint } from './types'; | ||
import { ResourceMetrics, InstrumentType, InstrumentationLibraryMetrics, MetricData, DataPoint, Histogram } from '@opentelemetry/sdk-metrics-base'; | ||
export declare class PrometheusSerializer { | ||
@@ -7,6 +6,8 @@ private _prefix; | ||
constructor(prefix?: string, appendTimestamp?: boolean); | ||
serialize(checkpointSet: PrometheusCheckpoint[]): string; | ||
serializeCheckpointSet(checkpoint: PrometheusCheckpoint): string; | ||
serializeRecord(name: string, record: MetricRecord): string; | ||
serialize(resourceMetrics: ResourceMetrics): string; | ||
serializeInstrumentationLibraryMetrics(instrumentationLibraryMetrics: InstrumentationLibraryMetrics): string; | ||
serializeMetricData(metricData: MetricData): string; | ||
serializeSingularDataPoint(name: string, type: InstrumentType, dataPoint: DataPoint<number>): string; | ||
serializeHistogramDataPoint(name: string, type: InstrumentType, dataPoint: DataPoint<Histogram>): string; | ||
} | ||
//# sourceMappingURL=PrometheusSerializer.d.ts.map |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.PrometheusSerializer = void 0; | ||
/* | ||
@@ -19,2 +17,5 @@ * Copyright The OpenTelemetry Authors | ||
*/ | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.PrometheusSerializer = void 0; | ||
const api_1 = require("@opentelemetry/api"); | ||
const sdk_metrics_base_1 = require("@opentelemetry/sdk-metrics-base"); | ||
@@ -59,8 +60,8 @@ const core_1 = require("@opentelemetry/core"); | ||
* @param name the name of the metric | ||
* @param kind the kind of metric | ||
* @param type the kind of metric | ||
* @returns string | ||
*/ | ||
function enforcePrometheusNamingConvention(name, kind) { | ||
function enforcePrometheusNamingConvention(name, type) { | ||
// Prometheus requires that metrics of the Counter kind have "_total" suffix | ||
if (!name.endsWith('_total') && kind === sdk_metrics_base_1.MetricKind.COUNTER) { | ||
if (!name.endsWith('_total') && type === sdk_metrics_base_1.InstrumentType.COUNTER) { | ||
name = name + '_total'; | ||
@@ -86,14 +87,17 @@ } | ||
} | ||
function toPrometheusType(metricKind, aggregatorKind) { | ||
switch (aggregatorKind) { | ||
case sdk_metrics_base_1.AggregatorKind.SUM: | ||
if (metricKind === sdk_metrics_base_1.MetricKind.COUNTER || | ||
metricKind === sdk_metrics_base_1.MetricKind.OBSERVABLE_COUNTER) { | ||
function toPrometheusType(instrumentType, dataPointType) { | ||
switch (dataPointType) { | ||
case sdk_metrics_base_1.DataPointType.SINGULAR: | ||
if (instrumentType === sdk_metrics_base_1.InstrumentType.COUNTER || | ||
instrumentType === sdk_metrics_base_1.InstrumentType.OBSERVABLE_COUNTER) { | ||
return 'counter'; | ||
} | ||
/** MetricKind.UP_DOWN_COUNTER and MetricKind.OBSERVABLE_UP_DOWN_COUNTER */ | ||
/** | ||
* - HISTOGRAM | ||
* - UP_DOWN_COUNTER | ||
* - OBSERVABLE_GAUGE | ||
* - OBSERVABLE_UP_DOWN_COUNTER | ||
*/ | ||
return 'gauge'; | ||
case sdk_metrics_base_1.AggregatorKind.LAST_VALUE: | ||
return 'gauge'; | ||
case sdk_metrics_base_1.AggregatorKind.HISTOGRAM: | ||
case sdk_metrics_base_1.DataPointType.HISTOGRAM: | ||
return 'histogram'; | ||
@@ -131,66 +135,85 @@ default: | ||
} | ||
serialize(checkpointSet) { | ||
serialize(resourceMetrics) { | ||
let str = ''; | ||
for (const checkpoint of checkpointSet) { | ||
str += this.serializeCheckpointSet(checkpoint) + '\n'; | ||
for (const instrumentationLibraryMetrics of resourceMetrics.instrumentationLibraryMetrics) { | ||
str += this.serializeInstrumentationLibraryMetrics(instrumentationLibraryMetrics); | ||
} | ||
return str; | ||
} | ||
serializeCheckpointSet(checkpoint) { | ||
let name = sanitizePrometheusMetricName(escapeString(checkpoint.descriptor.name)); | ||
serializeInstrumentationLibraryMetrics(instrumentationLibraryMetrics) { | ||
let str = ''; | ||
for (const metric of instrumentationLibraryMetrics.metrics) { | ||
str += this.serializeMetricData(metric) + '\n'; | ||
} | ||
return str; | ||
} | ||
serializeMetricData(metricData) { | ||
let name = sanitizePrometheusMetricName(escapeString(metricData.descriptor.name)); | ||
if (this._prefix) { | ||
name = `${this._prefix}${name}`; | ||
} | ||
name = enforcePrometheusNamingConvention(name, checkpoint.descriptor.metricKind); | ||
const help = `# HELP ${name} ${escapeString(checkpoint.descriptor.description || 'description missing')}`; | ||
const type = `# TYPE ${name} ${toPrometheusType(checkpoint.descriptor.metricKind, checkpoint.aggregatorKind)}`; | ||
const results = checkpoint.records | ||
.map(it => this.serializeRecord(name, it)) | ||
.join(''); | ||
const dataPointType = metricData.dataPointType; | ||
name = enforcePrometheusNamingConvention(name, metricData.descriptor.type); | ||
const help = `# HELP ${name} ${escapeString(metricData.descriptor.description || 'description missing')}`; | ||
const type = `# TYPE ${name} ${toPrometheusType(metricData.descriptor.type, dataPointType)}`; | ||
let results = ''; | ||
switch (dataPointType) { | ||
case sdk_metrics_base_1.DataPointType.SINGULAR: { | ||
results = metricData.dataPoints | ||
.map(it => this.serializeSingularDataPoint(name, metricData.descriptor.type, it)) | ||
.join(''); | ||
break; | ||
} | ||
case sdk_metrics_base_1.DataPointType.HISTOGRAM: { | ||
results = metricData.dataPoints | ||
.map(it => this.serializeHistogramDataPoint(name, metricData.descriptor.type, it)) | ||
.join(''); | ||
break; | ||
} | ||
default: { | ||
api_1.diag.error(`Unrecognizable DataPointType: ${dataPointType} for metric "${name}"`); | ||
} | ||
} | ||
return `${help}\n${type}\n${results}`.trim(); | ||
} | ||
serializeRecord(name, record) { | ||
serializeSingularDataPoint(name, type, dataPoint) { | ||
let results = ''; | ||
name = enforcePrometheusNamingConvention(name, record.descriptor.metricKind); | ||
switch (record.aggregator.kind) { | ||
case sdk_metrics_base_1.AggregatorKind.SUM: | ||
case sdk_metrics_base_1.AggregatorKind.LAST_VALUE: { | ||
const { value, timestamp: hrtime } = record.aggregator.toPoint(); | ||
const timestamp = core_1.hrTimeToMilliseconds(hrtime); | ||
results += stringify(name, record.attributes, value, this._appendTimestamp ? timestamp : undefined, undefined); | ||
name = enforcePrometheusNamingConvention(name, type); | ||
const { value, attributes } = dataPoint; | ||
const timestamp = (0, core_1.hrTimeToMilliseconds)(dataPoint.endTime); | ||
results += stringify(name, attributes, value, this._appendTimestamp ? timestamp : undefined, undefined); | ||
return results; | ||
} | ||
serializeHistogramDataPoint(name, type, dataPoint) { | ||
let results = ''; | ||
name = enforcePrometheusNamingConvention(name, type); | ||
const { value, attributes } = dataPoint; | ||
const timestamp = (0, core_1.hrTimeToMilliseconds)(dataPoint.endTime); | ||
/** Histogram["bucket"] is not typed with `number` */ | ||
for (const key of ['count', 'sum']) { | ||
results += stringify(name + '_' + key, attributes, value[key], this._appendTimestamp ? timestamp : undefined, undefined); | ||
} | ||
let cumulativeSum = 0; | ||
const countEntries = value.buckets.counts.entries(); | ||
let infiniteBoundaryDefined = false; | ||
for (const [idx, val] of countEntries) { | ||
cumulativeSum += val; | ||
const upperBound = value.buckets.boundaries[idx]; | ||
/** HistogramAggregator is producing different boundary output - | ||
* in one case not including infinity values, in other - | ||
* full, e.g. [0, 100] and [0, 100, Infinity] | ||
* we should consider that in export, if Infinity is defined, use it | ||
* as boundary | ||
*/ | ||
if (upperBound === undefined && infiniteBoundaryDefined) { | ||
break; | ||
} | ||
case sdk_metrics_base_1.AggregatorKind.HISTOGRAM: { | ||
const { value, timestamp: hrtime } = record.aggregator.toPoint(); | ||
const timestamp = core_1.hrTimeToMilliseconds(hrtime); | ||
/** Histogram["bucket"] is not typed with `number` */ | ||
for (const key of ['count', 'sum']) { | ||
results += stringify(name + '_' + key, record.attributes, value[key], this._appendTimestamp ? timestamp : undefined, undefined); | ||
} | ||
let cumulativeSum = 0; | ||
const countEntries = value.buckets.counts.entries(); | ||
let infiniteBoundaryDefined = false; | ||
for (const [idx, val] of countEntries) { | ||
cumulativeSum += val; | ||
const upperBound = value.buckets.boundaries[idx]; | ||
/** HistogramAggregator is producing different boundary output - | ||
* in one case not including inifinity values, in other - | ||
* full, e.g. [0, 100] and [0, 100, Infinity] | ||
* we should consider that in export, if Infinity is defined, use it | ||
* as boundary | ||
*/ | ||
if (upperBound === undefined && infiniteBoundaryDefined) { | ||
break; | ||
} | ||
if (upperBound === Infinity) { | ||
infiniteBoundaryDefined = true; | ||
} | ||
results += stringify(name + '_bucket', record.attributes, cumulativeSum, this._appendTimestamp ? timestamp : undefined, { | ||
le: upperBound === undefined || upperBound === Infinity | ||
? '+Inf' | ||
: String(upperBound), | ||
}); | ||
} | ||
break; | ||
if (upperBound === Infinity) { | ||
infiniteBoundaryDefined = true; | ||
} | ||
results += stringify(name + '_bucket', attributes, cumulativeSum, this._appendTimestamp ? timestamp : undefined, { | ||
le: upperBound === undefined || upperBound === Infinity | ||
? '+Inf' | ||
: String(upperBound), | ||
}); | ||
} | ||
@@ -197,0 +220,0 @@ return results; |
@@ -1,2 +0,2 @@ | ||
export declare const VERSION = "0.27.0"; | ||
export declare const VERSION = "0.28.0"; | ||
//# sourceMappingURL=version.d.ts.map |
@@ -20,3 +20,3 @@ "use strict"; | ||
// this is autogenerated file, see scripts/version-update.js | ||
exports.VERSION = '0.27.0'; | ||
exports.VERSION = '0.28.0'; | ||
//# sourceMappingURL=version.js.map |
{ | ||
"name": "@opentelemetry/exporter-prometheus", | ||
"version": "0.27.0", | ||
"version": "0.28.0", | ||
"description": "OpenTelemetry Exporter Prometheus provides a metrics endpoint for Prometheus", | ||
@@ -9,2 +9,3 @@ "main": "build/src/index.js", | ||
"scripts": { | ||
"prepublishOnly": "npm run compile", | ||
"compile": "tsc --build", | ||
@@ -14,3 +15,3 @@ "clean": "tsc --build --clean", | ||
"tdd": "npm run test -- --watch-extensions ts --watch", | ||
"codecov": "nyc report --reporter=json && codecov -f coverage/*.json -p ../../", | ||
"codecov": "nyc report --reporter=json && codecov -f coverage/*.json -p ../../../", | ||
"lint": "eslint . --ext .ts", | ||
@@ -20,4 +21,5 @@ "lint:fix": "eslint . --ext .ts --fix", | ||
"watch": "tsc --build --watch", | ||
"precompile": "lerna run version --scope $(npm pkg get name) --include-filtered-dependencies", | ||
"prewatch": "npm run precompile" | ||
"precompile": "lerna run version --scope $(npm pkg get name) --include-dependencies", | ||
"prewatch": "npm run precompile", | ||
"peer-api-check": "node ../../../scripts/peer-api-check.js" | ||
}, | ||
@@ -33,3 +35,3 @@ "keywords": [ | ||
"engines": { | ||
"node": ">=8.0.0" | ||
"node": ">=8.12.0" | ||
}, | ||
@@ -48,6 +50,6 @@ "files": [ | ||
"devDependencies": { | ||
"@opentelemetry/api": "^1.0.3", | ||
"@opentelemetry/api": "^1.0.0", | ||
"@types/mocha": "8.2.3", | ||
"@types/node": "14.17.11", | ||
"@types/sinon": "10.0.2", | ||
"@types/node": "14.17.33", | ||
"@types/sinon": "10.0.6", | ||
"codecov": "3.8.3", | ||
@@ -59,3 +61,3 @@ "mocha": "7.2.0", | ||
"ts-mocha": "8.0.0", | ||
"typescript": "4.3.5" | ||
"typescript": "4.4.4" | ||
}, | ||
@@ -66,7 +68,7 @@ "peerDependencies": { | ||
"dependencies": { | ||
"@opentelemetry/api-metrics": "0.27.0", | ||
"@opentelemetry/core": "1.0.1", | ||
"@opentelemetry/sdk-metrics-base": "0.27.0" | ||
"@opentelemetry/api-metrics": "0.28.0", | ||
"@opentelemetry/core": "1.2.0", | ||
"@opentelemetry/sdk-metrics-base": "0.28.0" | ||
}, | ||
"gitHead": "f5e227f0cb829df1ca2dc220a3e0e8ae0e607405" | ||
"gitHead": "28a177ffe3950c5602a91391af582a7a8c813c7d" | ||
} |
# OpenTelemetry Prometheus Metric Exporter | ||
[![NPM Published Version][npm-img]][npm-url] | ||
[![dependencies][dependencies-image]][dependencies-url] | ||
[![devDependencies][devDependencies-image]][devDependencies-url] | ||
[![Apache License][license-image]][license-image] | ||
The OpenTelemetry Prometheus Metrics Exporter allows the user to send collected [OpenTelemetry Metrics](https://github.com/open-telemetry/opentelemetry-js/tree/main/packages/opentelemetry-sdk-metrics-base) to Prometheus. | ||
The OpenTelemetry Prometheus Metrics Exporter allows the user to send collected [OpenTelemetry Metrics](https://github.com/open-telemetry/opentelemetry-js/tree/main/experimental/packages/opentelemetry-sdk-metrics-base) to Prometheus. | ||
@@ -64,7 +62,3 @@ [Prometheus](https://prometheus.io/) is a monitoring system that collects metrics, by scraping exposed endpoints at regular intervals, evaluating rule expressions. It can also trigger alerts if certain conditions are met. For assistance setting up Prometheus, [Click here](https://opencensus.io/codelabs/prometheus/#0) for a guided codelab. | ||
[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat | ||
[dependencies-image]: https://status.david-dm.org/gh/open-telemetry/opentelemetry-js.svg?path=experimental%2Fpackages%2Fopentelemetry-exporter-prometheus | ||
[dependencies-url]: https://david-dm.org/open-telemetry/opentelemetry-js?path=experimental%2Fpackages%2Fopentelemetry-exporter-prometheus | ||
[devDependencies-image]: https://status.david-dm.org/gh/open-telemetry/opentelemetry-js.svg?path=experimental%2Fpackages%2Fopentelemetry-exporter-prometheus&type=dev | ||
[devDependencies-url]: https://david-dm.org/open-telemetry/opentelemetry-js?path=experimental%2Fpackages%2Fopentelemetry-exporter-prometheus&type=dev | ||
[npm-url]: https://www.npmjs.com/package/@opentelemetry/exporter-prometheus | ||
[npm-img]: https://badge.fury.io/js/%40opentelemetry%2Fexporter-prometheus.svg |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
67308
18
570
64
+ Added@opentelemetry/api@1.1.0(transitive)
+ Added@opentelemetry/api-metrics@0.28.0(transitive)
+ Added@opentelemetry/core@1.2.0(transitive)
+ Added@opentelemetry/resources@1.2.0(transitive)
+ Added@opentelemetry/sdk-metrics-base@0.28.0(transitive)
+ Added@opentelemetry/semantic-conventions@1.2.0(transitive)
- Removed@opentelemetry/api@1.0.4(transitive)
- Removed@opentelemetry/api-metrics@0.27.0(transitive)
- Removed@opentelemetry/core@1.0.1(transitive)
- Removed@opentelemetry/resources@1.0.1(transitive)
- Removed@opentelemetry/sdk-metrics-base@0.27.0(transitive)
- Removed@opentelemetry/semantic-conventions@1.0.1(transitive)
Updated@opentelemetry/core@1.2.0