New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@serverless-guru/logger

Package Overview
Dependencies
Maintainers
0
Versions
11
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@serverless-guru/logger - npm Package Compare versions

Comparing version

to
1.1.0

3

lib/cjs/constants.d.ts

@@ -30,6 +30,7 @@ declare enum MetricUnitList {

}
declare const LOG_LEVELS: readonly ["debug", "info", "warn", "error"];
declare const MAX_PAYLOAD_SIZE = 60000;
declare const COMPRESS_PAYLOAD_SIZE = 25000;
declare const MAX_PAYLOAD_MESSAGE = "Log too large";
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE };
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE, LOG_LEVELS };
//# sourceMappingURL=constants.d.ts.map
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.MAX_PAYLOAD_MESSAGE = exports.COMPRESS_PAYLOAD_SIZE = exports.MAX_PAYLOAD_SIZE = exports.MetricUnitList = void 0;
exports.LOG_LEVELS = exports.MAX_PAYLOAD_MESSAGE = exports.COMPRESS_PAYLOAD_SIZE = exports.MAX_PAYLOAD_SIZE = exports.MetricUnitList = void 0;
var MetricUnitList;

@@ -34,2 +34,4 @@ (function (MetricUnitList) {

})(MetricUnitList || (exports.MetricUnitList = MetricUnitList = {}));
const LOG_LEVELS = ["debug", "info", "warn", "error"];
exports.LOG_LEVELS = LOG_LEVELS;
const MAX_PAYLOAD_SIZE = 60000;

@@ -36,0 +38,0 @@ exports.MAX_PAYLOAD_SIZE = MAX_PAYLOAD_SIZE;

@@ -12,2 +12,3 @@ import { MetricUnitList } from "./constants.js";

constructor(serviceName: string, applicationName: string, correlationId?: string | null);
getLogLevel(level: Level): number;
log(level: Level, message?: string, payload?: JSONValue | Error, context?: JSONObject, sensitiveAttributes?: StringArray): void;

@@ -14,0 +15,0 @@ info(message?: string, payload?: JSONValue, context?: JSONObject, sensitiveAttributes?: StringArray): void;

"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });

@@ -8,9 +11,14 @@ exports.Logger = void 0;

const constants_js_1 = require("./constants.js");
const NO_LOG_EVENT = process.env.SG_LOGGER_LOG_EVENT?.toLowerCase() === "false";
const SKIP_MASK = process.env.SG_LOGGER_MASK?.toLowerCase() === "false";
const MAX_SIZE = parseInt(process.env.SG_LOGGER_MAX_SIZE || `${constants_js_1.MAX_PAYLOAD_SIZE}`) || constants_js_1.MAX_PAYLOAD_SIZE;
const COMPRESS_SIZE = parseInt(process.env.SG_LOGGER_COMPRESS_SIZE || `${constants_js_1.COMPRESS_PAYLOAD_SIZE}`) || constants_js_1.COMPRESS_PAYLOAD_SIZE;
const NO_COMPRESS = process.env.SG_LOGGER_NO_COMPRESS?.toLowerCase() === "true";
const NO_SKIP = process.env.SG_LOGGER_NO_SKIP?.toLowerCase() === "true";
const LOG_TS = process.env.SG_LOGGER_LOG_TS?.toLowerCase() === "true";
const env_var_1 = __importDefault(require("env-var"));
const LOG_EVENT = env_var_1.default.get("SG_LOGGER_LOG_EVENT").default("true").asBool();
const MASK_SECRETS = env_var_1.default.get("SG_LOGGER_MASK").default("true").asBool();
const MAX_SIZE = env_var_1.default.get("SG_LOGGER_MAX_SIZE").default(constants_js_1.MAX_PAYLOAD_SIZE).asInt();
const COMPRESS_SIZE = env_var_1.default.get("SG_LOGGER_COMPRESS_SIZE").default(constants_js_1.COMPRESS_PAYLOAD_SIZE).asInt();
const NO_COMPRESS = env_var_1.default.get("SG_LOGGER_NO_COMPRESS").default("false").asBool();
const NO_SKIP = env_var_1.default.get("SG_LOGGER_NO_SKIP").default("false").asBool();
const LOG_TS = env_var_1.default.get("SG_LOGGER_LOG_TS").default("false").asBool();
const LOG_LEVEL = env_var_1.default
.get("SG_LOGGER_LOG_LEVEL")
.default(env_var_1.default.get("AWS_LAMBDA_LOG_LEVEL").default("warn").asString().toLowerCase())
.asEnum(constants_js_1.LOG_LEVELS);
class Logger {

@@ -31,5 +39,19 @@ static METRIC_UNITS = constants_js_1.MetricUnitList;

this.console =
process.env.AWS_LAMBDA_LOG_FORMAT === "JSON" ? new node_console_1.Console((process.stdout, process.stderr)) : console;
env_var_1.default.get("AWS_LAMBDA_LOG_FORMAT").asString() === "JSON"
? new node_console_1.Console((process.stdout, process.stderr))
: console;
}
getLogLevel(level) {
const logLevels = {
debug: 0,
info: 1,
warn: 2,
error: 3,
};
return logLevels[level] || -1;
}
log(level, message = "", payload = {}, context = {}, sensitiveAttributes = []) {
if (this.getLogLevel(level) < this.getLogLevel(LOG_LEVEL)) {
return;
}
try {

@@ -63,3 +85,3 @@ // Default sensitive attributes

}
if (SKIP_MASK === true) {
if (MASK_SECRETS === false) {
return value;

@@ -201,3 +223,3 @@ }

logInputEvent(event) {
if (!NO_LOG_EVENT) {
if (LOG_EVENT) {
this.info("Input Event", event, {});

@@ -204,0 +226,0 @@ }

@@ -1,3 +0,3 @@

import type { MetricUnitList } from "../constants.js";
type Level = "info" | "debug" | "warn" | "error";
import type { LOG_LEVELS, MetricUnitList } from "../constants.js";
type Level = (typeof LOG_LEVELS)[number];
type StringArray = Array<string>;

@@ -4,0 +4,0 @@ type MetricUnit = (typeof MetricUnitList)[keyof typeof MetricUnitList];

@@ -30,6 +30,7 @@ declare enum MetricUnitList {

}
declare const LOG_LEVELS: readonly ["debug", "info", "warn", "error"];
declare const MAX_PAYLOAD_SIZE = 60000;
declare const COMPRESS_PAYLOAD_SIZE = 25000;
declare const MAX_PAYLOAD_MESSAGE = "Log too large";
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE };
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE, LOG_LEVELS };
//# sourceMappingURL=constants.d.ts.map

@@ -31,5 +31,6 @@ var MetricUnitList;

})(MetricUnitList || (MetricUnitList = {}));
const LOG_LEVELS = ["debug", "info", "warn", "error"];
const MAX_PAYLOAD_SIZE = 60000;
const COMPRESS_PAYLOAD_SIZE = 25000;
const MAX_PAYLOAD_MESSAGE = "Log too large";
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE };
export { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE, LOG_LEVELS };

@@ -12,2 +12,3 @@ import { MetricUnitList } from "./constants.js";

constructor(serviceName: string, applicationName: string, correlationId?: string | null);
getLogLevel(level: Level): number;
log(level: Level, message?: string, payload?: JSONValue | Error, context?: JSONObject, sensitiveAttributes?: StringArray): void;

@@ -14,0 +15,0 @@ info(message?: string, payload?: JSONValue, context?: JSONObject, sensitiveAttributes?: StringArray): void;

import { randomUUID } from "node:crypto";
import { gzipSync } from "node:zlib";
import { Console } from "node:console";
import { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE } from "./constants.js";
const NO_LOG_EVENT = process.env.SG_LOGGER_LOG_EVENT?.toLowerCase() === "false";
const SKIP_MASK = process.env.SG_LOGGER_MASK?.toLowerCase() === "false";
const MAX_SIZE = parseInt(process.env.SG_LOGGER_MAX_SIZE || `${MAX_PAYLOAD_SIZE}`) || MAX_PAYLOAD_SIZE;
const COMPRESS_SIZE = parseInt(process.env.SG_LOGGER_COMPRESS_SIZE || `${COMPRESS_PAYLOAD_SIZE}`) || COMPRESS_PAYLOAD_SIZE;
const NO_COMPRESS = process.env.SG_LOGGER_NO_COMPRESS?.toLowerCase() === "true";
const NO_SKIP = process.env.SG_LOGGER_NO_SKIP?.toLowerCase() === "true";
const LOG_TS = process.env.SG_LOGGER_LOG_TS?.toLowerCase() === "true";
import { MetricUnitList, MAX_PAYLOAD_SIZE, COMPRESS_PAYLOAD_SIZE, MAX_PAYLOAD_MESSAGE, LOG_LEVELS, } from "./constants.js";
import env from "env-var";
const LOG_EVENT = env.get("SG_LOGGER_LOG_EVENT").default("true").asBool();
const MASK_SECRETS = env.get("SG_LOGGER_MASK").default("true").asBool();
const MAX_SIZE = env.get("SG_LOGGER_MAX_SIZE").default(MAX_PAYLOAD_SIZE).asInt();
const COMPRESS_SIZE = env.get("SG_LOGGER_COMPRESS_SIZE").default(COMPRESS_PAYLOAD_SIZE).asInt();
const NO_COMPRESS = env.get("SG_LOGGER_NO_COMPRESS").default("false").asBool();
const NO_SKIP = env.get("SG_LOGGER_NO_SKIP").default("false").asBool();
const LOG_TS = env.get("SG_LOGGER_LOG_TS").default("false").asBool();
const LOG_LEVEL = env
.get("SG_LOGGER_LOG_LEVEL")
.default(env.get("AWS_LAMBDA_LOG_LEVEL").default("warn").asString().toLowerCase())
.asEnum(LOG_LEVELS);
class Logger {

@@ -27,5 +32,19 @@ static METRIC_UNITS = MetricUnitList;

this.console =
process.env.AWS_LAMBDA_LOG_FORMAT === "JSON" ? new Console((process.stdout, process.stderr)) : console;
env.get("AWS_LAMBDA_LOG_FORMAT").asString() === "JSON"
? new Console((process.stdout, process.stderr))
: console;
}
getLogLevel(level) {
const logLevels = {
debug: 0,
info: 1,
warn: 2,
error: 3,
};
return logLevels[level] || -1;
}
log(level, message = "", payload = {}, context = {}, sensitiveAttributes = []) {
if (this.getLogLevel(level) < this.getLogLevel(LOG_LEVEL)) {
return;
}
try {

@@ -59,3 +78,3 @@ // Default sensitive attributes

}
if (SKIP_MASK === true) {
if (MASK_SECRETS === false) {
return value;

@@ -197,3 +216,3 @@ }

logInputEvent(event) {
if (!NO_LOG_EVENT) {
if (LOG_EVENT) {
this.info("Input Event", event, {});

@@ -200,0 +219,0 @@ }

@@ -1,3 +0,3 @@

import type { MetricUnitList } from "../constants.js";
type Level = "info" | "debug" | "warn" | "error";
import type { LOG_LEVELS, MetricUnitList } from "../constants.js";
type Level = (typeof LOG_LEVELS)[number];
type StringArray = Array<string>;

@@ -4,0 +4,0 @@ type MetricUnit = (typeof MetricUnitList)[keyof typeof MetricUnitList];

{
"name": "@serverless-guru/logger",
"version": "1.0.8",
"version": "1.1.0",
"description": "Common logger utility",

@@ -69,3 +69,6 @@ "main": "./lib/cjs/index.js",

"access": "public"
},
"dependencies": {
"env-var": "^7.5.0"
}
}

@@ -6,37 +6,41 @@ # Logger

## Key features
* Small footprint
* Enforces structured and consistent logs across all your Lambda functions
* Automatically masks sensible values
* Automatically compresses large payloads (>25Kb)
* Automatically ignores very large payloads (>60Kb)
* Supports CloudwatchLogs text and JSON format
- Small footprint
- Enforces structured and consistent logs across all your Lambda functions
- Automatically masks sensible values
- Automatically compresses large payloads (>25Kb)
- Automatically ignores very large payloads (>60Kb)
- Supports CloudwatchLogs text and JSON format
### Environment variables
* SG_LOGGER_LOG_EVENT: Log event, _default: true_
* SG_LOGGER_SKIP_MASK: Skip masking of sensible values, _default: false_
* SG_LOGGER_MAX_SIZE: Skip logging payload bigger than size (in bytes), _default: 60000_
* SG_LOGGER_NO_SKIP: Don't skip payloads bigger than *SG_LOGGER_MAX_SIZE*, _default: false_
* SG_LOGGER_COMPRESS_SIZE: Compress (gzip) payload bigger than size (in bytes), _default: 25000_
* SG_LOGGER_NO_COMPRESS: Don't compress logs bigger than *SG_LOGGER_COMPRESS_SIZE*, _default: false_
* SG_LOGGER_LOG_TS: Add timestamp (in ms) to the output object (useful when not using Cloudwatch Logs), _default: false_
- `SG_LOGGER_LOG_EVENT`: Log event, _default: true_
- `SG_LOGGER_SKIP_MASK`: Skip masking of sensible values, _default: false_
- `SG_LOGGER_MAX_SIZE`: Skip logging payload bigger than size (in bytes), _default: 60000_
- `SG_LOGGER_NO_SKIP`: Don't skip payloads bigger than _SG_LOGGER_MAX_SIZE_, _default: false_
- `SG_LOGGER_COMPRESS_SIZE`: Compress (gzip) payload bigger than size (in bytes), _default: 25000_
- `SG_LOGGER_NO_COMPRESS`: Don't compress logs bigger than `SG_LOGGER_COMPRESS_SIZE`, _default: false_
- `SG_LOGGER_LOG_TS`: Add timestamp (in ms) to the output object (useful when not using Cloudwatch Logs), _default: false_
- `SG_LOGGER_LOG_LEVEL`: The minimum level to log. One of `debug`, `info`, `warn` or `error`, _default: same as `AWS_LAMBDA_LOG_LEVEL` or `warn`_
## Log schema
```json
{
"timestamp": 1729066777619,
"service": "myService",
"level": "INFO",
"correlationId": "092f5cf0-d1c8-4a71-a8a0-3c86aeb1c212",
"message": "my message",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"payload": {
"key1": "value1",
"key2": 3,
"key3": {
"key31": "value31"
"timestamp": 1729066777619,
"service": "myService",
"level": "INFO",
"correlationId": "092f5cf0-d1c8-4a71-a8a0-3c86aeb1c212",
"message": "my message",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"payload": {
"key1": "value1",
"key2": 3,
"key3": {
"key31": "value31"
}
}
}
}

@@ -46,3 +50,5 @@ ```

### Error schema
#### Without an Error object
`logger.error('global error', {key1: 'value1'})`

@@ -52,14 +58,14 @@

{
"timestamp": 1729066777619,
"service": "myService",
"level": "ERROR",
"correlationId": "3bfd61c4-8934-4ae9-b646-d57144094986",
"message": "invalid factor",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"payload": {
"key1": "value1"
}
"timestamp": 1729066777619,
"service": "myService",
"level": "ERROR",
"correlationId": "3bfd61c4-8934-4ae9-b646-d57144094986",
"message": "invalid factor",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"payload": {
"key1": "value1"
}
}

@@ -69,10 +75,14 @@ ```

#### With an Error object
```javascript
logger.error('global error', new RangeError('invalid factor', {
cause: {
factor: event.factor,
limit: 10,
reason: 'too big'
}
}))
logger.error(
"global error",
new RangeError("invalid factor", {
cause: {
factor: event.factor,
limit: 10,
reason: "too big",
},
})
);
```

@@ -82,24 +92,25 @@

{
"timestamp": 1729066777619,
"service": "myService",
"level": "ERROR",
"correlationId": "3bfd61c4-8934-4ae9-b646-d57144094986",
"message": "global error",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"error": {
"name": "RangeError",
"location": "/path/to/file.js:341",
"message": "invalid factor",
"stack": "RangeError: invalid factor\n at main (/path/to/file.js:341:15)\n at /path/to/file2.js:953:30\n at new Promise (<anonymous>)\n at AwsInvokeLocal.invokeLocalNodeJs (/path/to/file3.js:906:12)\n at process.processTicksAndRejections (node:internal/process/task_queues:95:5)",
"cause": {
"factor": 12,
"limit": 10,
"reason": "too big"
}
}
"timestamp": 1729066777619,
"service": "myService",
"level": "ERROR",
"correlationId": "3bfd61c4-8934-4ae9-b646-d57144094986",
"message": "global error",
"context": {
"handlerNamespace": "multiply",
"factor": 2
},
"error": {
"name": "RangeError",
"location": "/path/to/file.js:341",
"message": "invalid factor",
"stack": "RangeError: invalid factor\n at main (/path/to/file.js:341:15)\n at /path/to/file2.js:953:30\n at new Promise (<anonymous>)\n at AwsInvokeLocal.invokeLocalNodeJs (/path/to/file3.js:906:12)\n at process.processTicksAndRejections (node:internal/process/task_queues:95:5)",
"cause": {
"factor": 12,
"limit": 10,
"reason": "too big"
}
}
}
```
## Installation

@@ -112,5 +123,7 @@

## Usage
The `Logger` instance can be re-used across modules, allowing to keep globally defined context keys.
**helpers/logger.js**
```javascript

@@ -122,64 +135,67 @@ const { Logger } = require("@serverless-guru/logger");

```
**helpers/math.js**
```javascript
const { logger } = require('./logger')
const { logger } = require("./logger");
export const multiply = async (n, factor) => {
const sleepMs = Math.floor(Math.random() * 1000 * factor)
const sleepMs = Math.floor(Math.random() * 1000 * factor);
await delay(sleepMs)
await delay(sleepMs);
const result = n * factor
const result = n * factor;
logger.debug('Multiply', { n, duration: sleepMs, result })
logger.debug("Multiply", { n, duration: sleepMs, result });
return result
}
return result;
};
const delay = (ms) => {
return new Promise((resolve) => setTimeout(resolve, ms))
}
return new Promise((resolve) => setTimeout(resolve, ms));
};
```
**handlers/multiply.js**
```javascript
const { logger, metricUnits } = require("../helpers/logger.js");
const LOG_FORMAT = process.env.AWS_LAMBDA_LOG_FORMAT || 'Text'
const LOG_FORMAT = process.env.AWS_LAMBDA_LOG_FORMAT || "Text";
const main = async (event, context) => {
try {
logger.setCorrelationId(context.awsRequestId)
logger.addContextKey({
handlerNamespace: 'multiply',
logFormat: LOG_FORMAT,
})
logger.logInputEvent({ event });
try {
logger.setCorrelationId(context.awsRequestId);
logger.addContextKey({
handlerNamespace: "multiply",
logFormat: LOG_FORMAT,
});
logger.logInputEvent({ event });
if (event.factor) {
logger.addContextKey({ factor: event.factor })
if (event.factor > 10) {
const cause = { factor: event.factor, limit: 10, reason: 'too big' }
logger.error('invalid factor', cause)
throw new RangeError('invalid factor', { cause })
}
}
if (event.factor) {
logger.addContextKey({ factor: event.factor });
if (event.factor > 10) {
const cause = { factor: event.factor, limit: 10, reason: "too big" };
logger.error("invalid factor", cause);
throw new RangeError("invalid factor", { cause });
}
}
const start = new Date().getTime()
const promises = [1, 2, 3, 4, 5].map((n) => multiply(n, event.factor || 1))
const result = await Promise.all(promises)
const end = new Date().getTime()
const start = new Date().getTime();
const promises = [1, 2, 3, 4, 5].map((n) => multiply(n, event.factor || 1));
const result = await Promise.all(promises);
const end = new Date().getTime();
logger.info('Result', { result }, {}, ['factor'])
logger.info("Result", { result }, {}, ["factor"]);
logger.metric('multiply', {
name: 'Duration',
unit: metricUnits.Milliseconds,
value: end - start,
dimensions: [['LOG_FORMAT', LOG_FORMAT]],
})
} catch(error) {
logger.error('global error', error)
} finally {
logger.clearLogContext()
}
logger.metric("multiply", {
name: "Duration",
unit: metricUnits.Milliseconds,
value: end - start,
dimensions: [["LOG_FORMAT", LOG_FORMAT]],
});
} catch (error) {
logger.error("global error", error);
} finally {
logger.clearLogContext();
}
};

@@ -189,3 +205,5 @@

```
## The importance of CorrelationId
Why define a _correlationId_ when we already have a _requestId_ provided by AWS?.

@@ -197,9 +215,10 @@

* The Web client generates a correlationId and passes it in the payload to API Gateway (API Gateway Logs are set to log Payloads in JSON)
* The first Lambda uses the `setCorrelationId` method to assign the `correlationId` from the payload to all log outputs
* The `correlationId` is part of the payload sent to SQS
* The second Lambda uses the `setCorrelationId` method to assign the `correlationId` from the SQS event to all log outputs
* The `correlationId` is added to the invocation payload of the remote API.
- The Web client generates a correlationId and passes it in the payload to API Gateway (API Gateway Logs are set to log Payloads in JSON)
- The first Lambda uses the `setCorrelationId` method to assign the `correlationId` from the payload to all log outputs
- The `correlationId` is part of the payload sent to SQS
- The second Lambda uses the `setCorrelationId` method to assign the `correlationId` from the SQS event to all log outputs
- The `correlationId` is added to the invocation payload of the remote API.
Using CloudWatchLogs insight, it is now possible to query simultaneously both Lambda LogGroups, API Gateway LogGroup with a single simple query:
```

@@ -210,143 +229,198 @@ fields @timestamp, @message

```
To get the logs of all events for the specific `correlationId` across multiple services.
## Class methods
### Constructor
```javascript
const logger = new Logger(serviceName,applicationName,correlationId)
const logger = new Logger(serviceName, applicationName, correlationId);
```
* __serviceName__ [string, mandatory]: Added to each log output
* __applicationName__ [string, mandatory]: Defines the Namespace for metrics
* __correlationId__ [string, optional]: A new UUIDv4 is generated when not defined. Added to each log output.
- **serviceName** [string, mandatory]: Added to each log output
- **applicationName** [string, mandatory]: Defines the Namespace for metrics
- **correlationId** [string, optional]: A new UUIDv4 is generated when not defined. Added to each log output.
### setCorrelationId
Set a correlationId used across all log statements. Useful when the _correlationId_ is received as payload to the Lambda function.
```javascript
logger.setCorrelationId(correlationId)
logger.setCorrelationId(correlationId);
```
* __correlationId__ [string, mandatory]
- **correlationId** [string, mandatory]
### getCorrelationId
Retrieves the current _correlationId_. Useful when the correlationId needs to be passed to API calls or other service integrations.
```javascript
const correlationId = logger.getCorrelationId()
const correlationId = logger.getCorrelationId();
```
* __correlationId__ [string, mandatory]
- **correlationId** [string, mandatory]
### logInputEvent
Logs the object passed as argument when the environment variable _LOG\_EVENT_ is set to _"true"_. Generally used to conditionally log the incoming event, but it can be used for any other payload too.
Logs the object passed as argument when the environment variable _LOG_EVENT_ is set to _"true"_. Generally used to conditionally log the incoming event, but it can be used for any other payload too.
The _message_ key will always be `Input Event`.
```javascript
logger.logInputEvent(payload)
logger.logInputEvent(payload);
```
* __payload__ [object]
- **payload** [object]
#### Example
To conditionally log the incoming event, the Lambda context and the environment variables:
```javascript
logger.logInputEvent({event, context, env: process.env})
logger.logInputEvent({ event, context, env: process.env });
```
### addContextKey
Add keys to the context object. Keys added to the context are available in all log outputs under the top level `context` key.
useful to automatically add values to all future logs.
```javascript
logger.addContextKey(contextObject)
logger.addContextKey(contextObject);
```
* __contextObject__: [object]
- **contextObject**: [object]
### clearLogContext
Clears the all context keys. This needs to be invoked at the end of each Lambda invocation to avoid re-using context keys across subsequent invocation.
```javascript
logger.clearLogContext()
logger.clearLogContext();
```
### log
Prints a log message.
```javascript
logger.log(level, message, payload, context, sensitiveAttributes)
logger.log(level, message, payload, context, sensitiveAttributes);
```
* __level__ [string, mandatory]: one of `info`, `debug`, `warn`, `error`
* __message__ [string, mandatory]: Assigned to the output `message`. It is good practice to keep it concise and describe the activity. Re-use the same message across multiple logs, identify the individual activities using context or payload values.
* __payload__ [string, object]: The payload to log
* __context__ [object]: Keys to add to the context of this log output
* __sensitiveAttributes__ [array of string]: Additional attributes to mask in this log output
- **level** [string, mandatory]: one of `info`, `debug`, `warn`, `error`
- **message** [string, mandatory]: Assigned to the output `message`. It is good practice to keep it concise and describe the activity. Re-use the same message across multiple logs, identify the individual activities using context or payload values.
- **payload** [string, object]: The payload to log
- **context** [object]: Keys to add to the context of this log output
- **sensitiveAttributes** [array of string]: Additional attributes to mask in this log output
#### Shorthand
* logger.info(message, payload, context, sensitiveAttributes)
* logger.debug(message, payload, context, sensitiveAttributes)
* logger.warn(message, payload, context, sensitiveAttributes)
* logger.error(message, payload, context, sensitiveAttributes)
- logger.info(message, payload, context, sensitiveAttributes)
- logger.debug(message, payload, context, sensitiveAttributes)
- logger.warn(message, payload, context, sensitiveAttributes)
- logger.error(message, payload, context, sensitiveAttributes)
#### Default masked attributes
Any key, be it in the `payload` or the `context`, having one of this values will be masked in the output:
* password
* userid
* token
* secret
* key
* x-api-key
* bearer
* authorization
- password
- userid
- token
- secret
- key
- x-api-key
- bearer
- authorization
Masking can be disabled, by setting the environment variable `LOG_MASK="false"`.
### Max log level
When `SG_LOGGER_LOG_LEVEL` is set, only log levels equal or greater than the specified value will be logged. Log levels are in the following order of importance.
`debug` < `info` < `warn` < `error`
**Note**: When use with AWS Lambda, and when `AWS_LAMBDA_LOG_LEVEL` is set to a stricter level than `SG_LOGGER_LOG_LEVEL`, ALC will drop logs emitted by the logger that don't match `AWS_LAMBDA_LOG_LEVEL`.
### metric
This generates a log output in [EMF](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format_Specification.html) format, creating a metric in [Cloudwatch Metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/working_with_metrics.html).
The metrics will be available under the namespace defined by `this.applicationName`.
```javascript
logger.metric(activity: string, meta: MetricMeta)
```
* __activity__ [string, mandatory]: The default dimension
* __meta__ [object]
* __name__ [string]: The name of the metric
* __value__ [number]: The value of the metric
* __unit__ [string]: The unit of the metric (see Logger.METRIC_UNITS)
* __dimensions__ [Array of String pairs]: Additional dimensions for the metric. `[[name1, value1], [name2, value2]]`
- **activity** [string, mandatory]: The default dimension
- **meta** [object]
- **name** [string]: The name of the metric
- **value** [number]: The value of the metric
- **unit** [string]: The unit of the metric (see Logger.METRIC_UNITS)
- **dimensions** [Array of String pairs]: Additional dimensions for the metric. `[[name1, value1], [name2, value2]]`
**Note**: To be able to use EMF, your log group needs to be sert to _standard_ (default) and not _infrequent access_.
## CloudWatchLogs logFormat
Lambda allows to use CloudWatchLogs Structured format (recommended), which not only stores the logs in JSON, but also allows to set the log level directly on the log group.
### Configure with [Serverless Framework](https://www.serverless.com)
#### Serverless v3
Serverless V3 doesn't allow to set the format directly from the function. You need to configure it via Cloudformation resources by extending the definition of the function generated by the framework.
The logical key for a function in Cloudformation is the logical key of the function with the suffix `LambaFunction`.
```yaml
service: myService
provider:
name: aws
runtime: nodejs20.x
architecture: 'arm64'
name: aws
runtime: nodejs20.x
architecture: "arm64"
functions:
Multiply:
handler: src/handlers/multiply.handler
name: multiply
environment:
LOG_EVENT: 'true'
Multiply:
handler: src/handlers/multiply.handler
name: multiply
environment:
LOG_EVENT: "true"
resources:
resources:
MultiplyLambdaFunction:
Type: AWS::Lambda::Function
Properties:
LoggingConfig:
LogFormat: JSON
ApplicationLogLevel: WARN
SystemLogLevel: INFO
resources:
MultiplyLambdaFunction:
Type: AWS::Lambda::Function
Properties:
LoggingConfig:
LogFormat: JSON
ApplicationLogLevel: WARN
SystemLogLevel: INFO
```
#### Serverless v4
With Serverless v4, the logFormat can be directly defined in the framework definition, either globally under `provider` or per function.
```yaml
service: myService
provider:
name: aws
runtime: nodejs20.x
architecture: 'arm64'
logs:
lambda:
logFormat: JSON
applicationLogLevel: WARN
systemLogLevel: INFO
name: aws
runtime: nodejs20.x
architecture: "arm64"
logs:
lambda:
logFormat: JSON
applicationLogLevel: WARN
systemLogLevel: INFO
functions:
Multiply:
handler: src/handlers/multiply.handler
name: multiply
environment:
LOG_EVENT: 'true'
logs:
logFormat: JSON
applicationLogLevel: WARN
systemLogLevel: INFO
Multiply:
handler: src/handlers/multiply.handler
name: multiply
environment:
LOG_EVENT: "true"
logs:
logFormat: JSON
applicationLogLevel: WARN
systemLogLevel: INFO
```

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet