New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

winston-cloudwatch

Package Overview
Dependencies
Maintainers
1
Versions
77
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

winston-cloudwatch - npm Package Compare versions

Comparing version

to
1.11.3

4

CHANGELOG.md

@@ -0,1 +1,5 @@

### 1.11.3
Fixes https://github.com/lazywithclass/winston-cloudwatch/issues/55
### 1.11.2

@@ -2,0 +6,0 @@

103

lib/cloudwatch-integration.js

@@ -24,52 +24,69 @@ var LIMITS = {

lib.getToken(aws, groupName, streamName, function(err, token) {
lib._postingEvents = true;
safeUpload(function(err) {
lib._postingEvents = false;
return cb(err);
});
if (err) {
debug('error getting token', err, true);
return cb(err);
}
// safeUpload introduced after https://github.com/lazywithclass/winston-cloudwatch/issues/55
// Note that calls to upload() can occur at a greater frequency
// than getToken() responses are processed. By way of example, consider if add() is
// called at 0s and 1.1s, each time with a single event, and upload() is called
// at 1.0s and 2.0s, with the same logEvents array, but calls to getToken()
// take 1.5s to return. When the first call to getToken() DOES return,
// it will send both events and empty the array. Then, when the second call
// go getToken() returns, without this check also here, it would attempt to send
// an empty array, resulting in the InvalidParameterException.
function safeUpload(cb) {
lib.getToken(aws, groupName, streamName, function(err, token) {
var entryIndex = 0;
var bytes = 0;
while (entryIndex < logEvents.length &&
entryIndex <= LIMITS.MAX_BATCH_SIZE_COUNT) {
var ev = logEvents[entryIndex];
// unit tests pass null elements
var evSize = ev ? Buffer.byteLength(ev.message, 'utf8') : 0;
if(evSize > LIMITS.MAX_EVENT_MSG_SIZE_BYTES) {
evSize = LIMITS.MAX_EVENT_MSG_SIZE_BYTES;
ev.message = ev.message.substring(0, evSize);
const msgTooBigErr = new Error('Message Truncated because it exceeds the CloudWatch size limit');
msgTooBigErr.logEvent = ev;
cb(msgTooBigErr);
if (err) {
debug('error getting token', err, true);
return cb(err);
}
if (bytes + evSize > LIMITS.MAX_BATCH_SIZE_BYTES) break;
bytes += evSize;
entryIndex++;
}
var payload = {
logGroupName: groupName,
logStreamName: streamName,
logEvents: logEvents.splice(0, entryIndex)
};
if (token) payload.sequenceToken = token;
var entryIndex = 0;
var bytes = 0;
while (entryIndex < logEvents.length &&
entryIndex <= LIMITS.MAX_BATCH_SIZE_COUNT) {
var ev = logEvents[entryIndex];
// unit tests pass null elements
var evSize = ev ? Buffer.byteLength(ev.message, 'utf8') : 0;
if(evSize > LIMITS.MAX_EVENT_MSG_SIZE_BYTES) {
evSize = LIMITS.MAX_EVENT_MSG_SIZE_BYTES;
ev.message = ev.message.substring(0, evSize);
const msgTooBigErr = new Error('Message Truncated because it exceeds the CloudWatch size limit');
msgTooBigErr.logEvent = ev;
cb(msgTooBigErr);
}
if (bytes + evSize > LIMITS.MAX_BATCH_SIZE_BYTES) break;
bytes += evSize;
entryIndex++;
}
lib._postingEvents = true;
debug('send to aws');
aws.putLogEvents(payload, function(err) {
if (err) {
if (err.code === 'InvalidSequenceTokenException') {
debug('InvalidSequenceTokenException, retrying', true)
lib.submitWithAnotherToken(aws, groupName, streamName, payload, cb)
} else {
debug('error during putLogEvents', err, true)
retrySubmit(aws, payload, 3, cb)
var payload = {
logGroupName: groupName,
logStreamName: streamName,
logEvents: logEvents.splice(0, entryIndex)
};
if (token) payload.sequenceToken = token;
lib._postingEvents = true;
debug('send to aws');
aws.putLogEvents(payload, function(err) {
if (err) {
if (err.code === 'InvalidSequenceTokenException') {
debug('InvalidSequenceTokenException, retrying', true)
lib.submitWithAnotherToken(aws, groupName, streamName, payload, cb)
} else {
debug('error during putLogEvents', err, true)
retrySubmit(aws, payload, 3, cb)
}
} else {
lib._postingEvents = false;
cb()
}
} else {
lib._postingEvents = false;
cb()
}
});
});
});
}
};

@@ -76,0 +93,0 @@

{
"name": "winston-cloudwatch",
"version": "1.11.2",
"version": "1.11.3",
"description": "Send logs to Amazon Cloudwatch using Winston.",

@@ -5,0 +5,0 @@ "keywords": [

@@ -1,2 +0,2 @@

# winston-cloudwatch [v1.11.2](https://github.com/lazywithclass/winston-cloudwatch/blob/master/CHANGELOG.md#1112)
# winston-cloudwatch [v1.11.3](https://github.com/lazywithclass/winston-cloudwatch/blob/master/CHANGELOG.md#1113)

@@ -3,0 +3,0 @@ [![Build Status](https://travis-ci.org/lazywithclass/winston-cloudwatch.svg?branch=master)](https://travis-ci.org/lazywithclass/winston-cloudwatch) [![Coverage Status](https://coveralls.io/repos/github/lazywithclass/winston-cloudwatch/badge.svg?branch=master)](https://coveralls.io/github/lazywithclass/winston-cloudwatch?branch=master) [![Dependency Status](https://david-dm.org/lazywithclass/winston-cloudwatch.svg)](https://david-dm.org/lazywithclass/winston-cloudwatch) [![dev dependencies](https://david-dm.org/lazywithclass/winston-cloudwatch/dev-status.svg)](https://david-dm.org/lazywithclass/winston-cloudwatch#info=devDependencies) [![peer dependencies](https://david-dm.org/lazywithclass/winston-cloudwatch/peer-status.svg)](https://david-dm.org/lazywithclass/winston-cloudwatch#info=peerDependencies)

@@ -26,3 +26,3 @@ describe('cloudwatch-integration', function() {

it('ignores upload calls if already in progress', function(done) {
it('ignores upload calls if putLogEvents already in progress', function(done) {
const events = [{ message : "test message", timestamp : new Date().toISOString()}];

@@ -40,2 +40,15 @@ aws.putLogEvents.onFirstCall().returns(); // Don't call call back to simulate ongoing request.

it('ignores upload calls if getToken already in progress', function(done) {
const events = [{ message : "test message", timestamp : new Date().toISOString()}];
lib.getToken.onFirstCall().returns(); // Don't call call back to simulate ongoing token request.
lib.getToken.onSecondCall().yields(null, 'token');
lib.upload(aws, 'group', 'stream', events, function(){});
lib.upload(aws, 'group', 'stream', events, function() {
// The second upload call should get ignored
lib.getToken.calledOnce.should.equal(true);
lib._postingEvents = false; // reset
done()
});
});
it('truncates very large messages and alerts the error handler', function(done) {

@@ -42,0 +55,0 @@ var BIG_MSG_LEN = 300000;