@aws-lambda-powertools/batch
Advanced tools
Comparing version
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.SqsFifoPartialProcessor = exports.processPartialResponse = exports.processPartialResponseSync = exports.BatchProcessor = exports.BatchProcessorSync = exports.BasePartialBatchProcessor = exports.UnexpectedBatchTypeError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = exports.EventType = void 0; | ||
exports.SqsFifoPartialProcessor = exports.processPartialResponse = exports.processPartialResponseSync = exports.BatchProcessor = exports.BatchProcessorSync = exports.BasePartialBatchProcessor = exports.UnexpectedBatchTypeError = exports.SqsFifoMessageGroupShortCircuitError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = exports.EventType = void 0; | ||
var constants_js_1 = require("./constants.js"); | ||
@@ -10,2 +10,3 @@ Object.defineProperty(exports, "EventType", { enumerable: true, get: function () { return constants_js_1.EventType; } }); | ||
Object.defineProperty(exports, "SqsFifoShortCircuitError", { enumerable: true, get: function () { return errors_js_1.SqsFifoShortCircuitError; } }); | ||
Object.defineProperty(exports, "SqsFifoMessageGroupShortCircuitError", { enumerable: true, get: function () { return errors_js_1.SqsFifoMessageGroupShortCircuitError; } }); | ||
Object.defineProperty(exports, "UnexpectedBatchTypeError", { enumerable: true, get: function () { return errors_js_1.UnexpectedBatchTypeError; } }); | ||
@@ -12,0 +13,0 @@ var BasePartialBatchProcessor_js_1 = require("./BasePartialBatchProcessor.js"); |
@@ -6,5 +6,17 @@ import type { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
/** | ||
* Process batch and partially report failed items | ||
* Base abstract class for processing batch records with partial failure handling | ||
* | ||
* This class extends the {@link BasePartialProcessor} class and adds additional | ||
* functionality to handle batch processing. Specifically, it provides methods | ||
* to collect failed records and build the partial failure response. | ||
* | ||
* @abstract | ||
*/ | ||
declare abstract class BasePartialBatchProcessor extends BasePartialProcessor { | ||
/** | ||
* Mapping of event types to their respective failure collectors | ||
* | ||
* Each service expects a different format for partial failure reporting, | ||
* this mapping ensures that the correct format is used for each event type. | ||
*/ | ||
COLLECTOR_MAPPING: { | ||
@@ -15,51 +27,88 @@ SQS: () => PartialItemFailures[]; | ||
}; | ||
/** | ||
* Response to be returned after processing | ||
*/ | ||
batchResponse: PartialItemFailureResponse; | ||
/** | ||
* Type of event that the processor is handling | ||
*/ | ||
eventType: keyof typeof EventType; | ||
/** | ||
* Initializes base batch processing class | ||
* @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
constructor(eventType: keyof typeof EventType); | ||
/** | ||
* Report messages to be deleted in case of partial failures | ||
* Clean up logic to be run after processing a batch | ||
* | ||
* If the entire batch failed, and the utility is not configured otherwise, | ||
* this method will throw a `FullBatchFailureError` with the list of errors | ||
* that occurred during processing. | ||
* | ||
* Otherwise, it will build the partial failure response based on the event type. | ||
*/ | ||
clean(): void; | ||
/** | ||
* Collects identifiers of failed items for a DynamoDB stream | ||
* @returns list of identifiers for failed items | ||
* Collect the identifiers of failed items for a DynamoDB stream | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectDynamoDBFailures(): PartialItemFailures[]; | ||
/** | ||
* Collects identifiers of failed items for a Kinesis stream | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for a Kinesis batch | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectKinesisFailures(): PartialItemFailures[]; | ||
/** | ||
* Collects identifiers of failed items for an SQS batch | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for an SQS batch | ||
* | ||
* The failures are collected based on the message ID of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectSqsFailures(): PartialItemFailures[]; | ||
/** | ||
* Determines whether all records in a batch failed to process | ||
* @returns true if all records resulted in exception results | ||
* Determine if the entire batch failed | ||
* | ||
* If the number of errors is equal to the number of records, then the | ||
* entire batch failed and this method will return `true`. | ||
*/ | ||
entireBatchFailed(): boolean; | ||
/** | ||
* Collects identifiers for failed batch items | ||
* @returns formatted messages to use in batch deletion | ||
* Collect identifiers for failed batch items | ||
* | ||
* The method will call the appropriate collector based on the event type | ||
* and return the list of failed items. | ||
*/ | ||
getMessagesToReport(): PartialItemFailures[]; | ||
/** | ||
* Determines if any records failed to process | ||
* @returns true if any records resulted in exception | ||
* Determine if there are any failed records to report | ||
* | ||
* If there are no failed records, then the batch was successful | ||
* and this method will return `false`. | ||
*/ | ||
hasMessagesToReport(): boolean; | ||
/** | ||
* Remove results from previous execution | ||
* Set up the processor with the initial state ready for processing | ||
*/ | ||
prepare(): void; | ||
/** | ||
* @returns Batch items that failed processing, if any | ||
* Get the response from the batch processing | ||
*/ | ||
response(): PartialItemFailureResponse; | ||
/** | ||
* Forward a record to the appropriate batch type | ||
* | ||
* Based on the event type that the processor was initialized with, this method | ||
* will cast the record to the appropriate batch type handler. | ||
* | ||
* @param record The record to be processed | ||
* @param eventType The type of event to process | ||
*/ | ||
toBatchType(record: EventSourceDataClassTypes, eventType: keyof typeof EventType): SQSRecord | KinesisStreamRecord | DynamoDBRecord; | ||
@@ -66,0 +115,0 @@ } |
@@ -8,3 +8,9 @@ "use strict"; | ||
/** | ||
* Process batch and partially report failed items | ||
* Base abstract class for processing batch records with partial failure handling | ||
* | ||
* This class extends the {@link BasePartialProcessor} class and adds additional | ||
* functionality to handle batch processing. Specifically, it provides methods | ||
* to collect failed records and build the partial failure response. | ||
* | ||
* @abstract | ||
*/ | ||
@@ -14,3 +20,4 @@ class BasePartialBatchProcessor extends BasePartialProcessor_js_1.BasePartialProcessor { | ||
* Initializes base batch processing class | ||
* @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
@@ -28,3 +35,9 @@ constructor(eventType) { | ||
/** | ||
* Report messages to be deleted in case of partial failures | ||
* Clean up logic to be run after processing a batch | ||
* | ||
* If the entire batch failed, and the utility is not configured otherwise, | ||
* this method will throw a `FullBatchFailureError` with the list of errors | ||
* that occurred during processing. | ||
* | ||
* Otherwise, it will build the partial failure response based on the event type. | ||
*/ | ||
@@ -42,4 +55,7 @@ clean() { | ||
/** | ||
* Collects identifiers of failed items for a DynamoDB stream | ||
* @returns list of identifiers for failed items | ||
* Collect the identifiers of failed items for a DynamoDB stream | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -57,4 +73,7 @@ collectDynamoDBFailures() { | ||
/** | ||
* Collects identifiers of failed items for a Kinesis stream | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for a Kinesis batch | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -70,4 +89,7 @@ collectKinesisFailures() { | ||
/** | ||
* Collects identifiers of failed items for an SQS batch | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for an SQS batch | ||
* | ||
* The failures are collected based on the message ID of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -83,4 +105,6 @@ collectSqsFailures() { | ||
/** | ||
* Determines whether all records in a batch failed to process | ||
* @returns true if all records resulted in exception results | ||
* Determine if the entire batch failed | ||
* | ||
* If the number of errors is equal to the number of records, then the | ||
* entire batch failed and this method will return `true`. | ||
*/ | ||
@@ -91,4 +115,6 @@ entireBatchFailed() { | ||
/** | ||
* Collects identifiers for failed batch items | ||
* @returns formatted messages to use in batch deletion | ||
* Collect identifiers for failed batch items | ||
* | ||
* The method will call the appropriate collector based on the event type | ||
* and return the list of failed items. | ||
*/ | ||
@@ -99,4 +125,6 @@ getMessagesToReport() { | ||
/** | ||
* Determines if any records failed to process | ||
* @returns true if any records resulted in exception | ||
* Determine if there are any failed records to report | ||
* | ||
* If there are no failed records, then the batch was successful | ||
* and this method will return `false`. | ||
*/ | ||
@@ -107,3 +135,3 @@ hasMessagesToReport() { | ||
/** | ||
* Remove results from previous execution | ||
* Set up the processor with the initial state ready for processing | ||
*/ | ||
@@ -117,3 +145,3 @@ prepare() { | ||
/** | ||
* @returns Batch items that failed processing, if any | ||
* Get the response from the batch processing | ||
*/ | ||
@@ -123,2 +151,11 @@ response() { | ||
} | ||
/** | ||
* Forward a record to the appropriate batch type | ||
* | ||
* Based on the event type that the processor was initialized with, this method | ||
* will cast the record to the appropriate batch type handler. | ||
* | ||
* @param record The record to be processed | ||
* @param eventType The type of event to process | ||
*/ | ||
toBatchType(record, eventType) { | ||
@@ -125,0 +162,0 @@ return constants_js_1.DATA_CLASS_MAPPING[eventType](record); |
@@ -1,34 +0,80 @@ | ||
import type { BaseRecord, BatchProcessingOptions, EventSourceDataClassTypes, FailureResponse, ResultType, SuccessResponse } from './types.js'; | ||
import type { BaseRecord, BatchProcessingOptions, EventSourceDataClassTypes, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Abstract class for batch processors. | ||
* | ||
* This class provides a common interface for processing records in a batch. | ||
* | ||
* Batch processors implementing this class should provide implementations for | ||
* a number of abstract methods that are specific to the type of processor or the | ||
* type of records being processed. | ||
* | ||
* The class comes with a few helper methods and hooks that can be used to prepare | ||
* the processor before processing records, clean up after processing records, and | ||
* handle records that succeed or fail processing. | ||
* | ||
* @abstract | ||
*/ | ||
declare abstract class BasePartialProcessor { | ||
/** | ||
* List of errors that occurred during processing | ||
*/ | ||
errors: Error[]; | ||
/** | ||
* List of records that failed processing | ||
*/ | ||
failureMessages: EventSourceDataClassTypes[]; | ||
/** | ||
* Record handler provided by customers to process records | ||
*/ | ||
handler: CallableFunction; | ||
/** | ||
* Options to be used during processing (optional) | ||
*/ | ||
options?: BatchProcessingOptions; | ||
/** | ||
* List of records to be processed | ||
*/ | ||
records: BaseRecord[]; | ||
successMessages: EventSourceDataClassTypes[]; | ||
/** | ||
* Initializes base processor class | ||
* List of records that were processed successfully | ||
*/ | ||
successMessages: EventSourceDataClassTypes[]; | ||
constructor(); | ||
/** | ||
* Clean class instance after processing | ||
* Clean or resets the processor instance after completing a batch | ||
* | ||
* This method should be called after processing a full batch to reset the processor. | ||
* | ||
* You can use this as a hook to run any cleanup logic after processing the records. | ||
* | ||
* @abstract | ||
*/ | ||
abstract clean(): void; | ||
/** | ||
* Keeps track of batch records that failed processing | ||
* @param record record that failed processing | ||
* @param exception exception that was thrown | ||
* @returns FailureResponse object with ["fail", exception, original record] | ||
* Method to handle a record that failed processing | ||
* | ||
* This method should be called when a record fails processing so that | ||
* the processor can keep track of the error and the record that failed. | ||
* | ||
* @param record Record that failed processing | ||
* @param error Error that was thrown | ||
*/ | ||
failureHandler(record: EventSourceDataClassTypes, exception: Error): FailureResponse; | ||
failureHandler(record: EventSourceDataClassTypes, error: Error): FailureResponse; | ||
/** | ||
* Prepare class instance before processing | ||
* | ||
* This method should be called before processing the records | ||
* | ||
* You can use this as a hook to run any setup logic before processing the records. | ||
* | ||
* @abstract | ||
*/ | ||
abstract prepare(): void; | ||
/** | ||
* Call instance's handler for each record | ||
* @returns List of processed records | ||
* Process all records with an asyncronous handler | ||
* | ||
* Once called, the processor will create an array of promises to process each record | ||
* and wait for all of them to settle before returning the results. | ||
* | ||
* Before and after processing, the processor will call the prepare and clean methods respectively. | ||
*/ | ||
@@ -39,2 +85,13 @@ process(): Promise<(SuccessResponse | FailureResponse)[]>; | ||
* | ||
* An implementation of this method is required for asyncronous processors. | ||
* | ||
* When implementing this method, you should at least call the successHandler method | ||
* when a record succeeds processing and the failureHandler method when a record | ||
* fails processing. | ||
* | ||
* This is to ensure that the processor keeps track of the results and the records | ||
* that succeeded and failed processing. | ||
* | ||
* @abstract | ||
* | ||
* @param record Record to be processed | ||
@@ -44,3 +101,15 @@ */ | ||
/** | ||
* Process a record with the handler | ||
* Process a record with a synchronous handler | ||
* | ||
* An implementation of this method is required for synchronous processors. | ||
* | ||
* When implementing this method, you should at least call the successHandler method | ||
* when a record succeeds processing and the failureHandler method when a record | ||
* fails processing. | ||
* | ||
* This is to ensure that the processor keeps track of the results and the records | ||
* that succeeded and failed processing. | ||
* | ||
* @abstract | ||
* | ||
* @param record Record to be processed | ||
@@ -50,3 +119,11 @@ */ | ||
/** | ||
* Call instance's handler for each record | ||
* Orchestrate the processing of a batch of records synchronously | ||
* and sequentially. | ||
* | ||
* The method is responsible for calling the prepare method before | ||
* processing the records and the clean method after processing the records. | ||
* | ||
* In the middle, the method will iterate over the records and call the | ||
* processRecordSync method for each record. | ||
* | ||
* @returns List of processed records | ||
@@ -56,18 +133,29 @@ */ | ||
/** | ||
* Set class instance attributes before execution | ||
* @param records List of records to be processed | ||
* @param handler CallableFunction to process entries of "records" | ||
* @param options Options to be used during processing | ||
* @returns this object | ||
* Set up the processor with the records and the handler | ||
* | ||
* This method should be called before processing the records to | ||
* bind the records and the handler for a specific invocation to | ||
* the processor. | ||
* | ||
* We use a separate method to do this rather than the constructor | ||
* to allow for reusing the processor instance across multiple invocations | ||
* by instantiating the processor outside of the Lambda function handler. | ||
* | ||
* @param records Array of records to be processed | ||
* @param handler CallableFunction to process each record from the batch | ||
* @param options Options to be used during processing (optional) | ||
*/ | ||
register(records: BaseRecord[], handler: CallableFunction, options?: BatchProcessingOptions): BasePartialProcessor; | ||
register(records: BaseRecord[], handler: CallableFunction, options?: BatchProcessingOptions): this; | ||
/** | ||
* Keeps track of batch records that were processed successfully | ||
* @param record record that succeeded processing | ||
* @param result result from record handler | ||
* @returns SuccessResponse object with ["success", result, original record] | ||
* Method to handle a record that succeeded processing | ||
* | ||
* This method should be called when a record succeeds processing so that | ||
* the processor can keep track of the result and the record that succeeded. | ||
* | ||
* @param record Record that succeeded processing | ||
* @param result Result from record handler | ||
*/ | ||
successHandler(record: EventSourceDataClassTypes, result: ResultType): SuccessResponse; | ||
successHandler(record: EventSourceDataClassTypes, result: unknown): SuccessResponse; | ||
} | ||
export { BasePartialProcessor }; | ||
//# sourceMappingURL=BasePartialProcessor.d.ts.map |
@@ -6,7 +6,16 @@ "use strict"; | ||
* Abstract class for batch processors. | ||
* | ||
* This class provides a common interface for processing records in a batch. | ||
* | ||
* Batch processors implementing this class should provide implementations for | ||
* a number of abstract methods that are specific to the type of processor or the | ||
* type of records being processed. | ||
* | ||
* The class comes with a few helper methods and hooks that can be used to prepare | ||
* the processor before processing records, clean up after processing records, and | ||
* handle records that succeed or fail processing. | ||
* | ||
* @abstract | ||
*/ | ||
class BasePartialProcessor { | ||
/** | ||
* Initializes base processor class | ||
*/ | ||
constructor() { | ||
@@ -17,13 +26,17 @@ this.successMessages = []; | ||
this.records = []; | ||
// No-op function to avoid null checks, will be overridden by customer when using the class | ||
this.handler = new Function(); | ||
} | ||
/** | ||
* Keeps track of batch records that failed processing | ||
* @param record record that failed processing | ||
* @param exception exception that was thrown | ||
* @returns FailureResponse object with ["fail", exception, original record] | ||
* Method to handle a record that failed processing | ||
* | ||
* This method should be called when a record fails processing so that | ||
* the processor can keep track of the error and the record that failed. | ||
* | ||
* @param record Record that failed processing | ||
* @param error Error that was thrown | ||
*/ | ||
failureHandler(record, exception) { | ||
const entry = ['fail', exception.message, record]; | ||
this.errors.push(exception); | ||
failureHandler(record, error) { | ||
const entry = ['fail', error.message, record]; | ||
this.errors.push(error); | ||
this.failureMessages.push(record); | ||
@@ -33,4 +46,8 @@ return entry; | ||
/** | ||
* Call instance's handler for each record | ||
* @returns List of processed records | ||
* Process all records with an asyncronous handler | ||
* | ||
* Once called, the processor will create an array of promises to process each record | ||
* and wait for all of them to settle before returning the results. | ||
* | ||
* Before and after processing, the processor will call the prepare and clean methods respectively. | ||
*/ | ||
@@ -52,3 +69,11 @@ async process() { | ||
/** | ||
* Call instance's handler for each record | ||
* Orchestrate the processing of a batch of records synchronously | ||
* and sequentially. | ||
* | ||
* The method is responsible for calling the prepare method before | ||
* processing the records and the clean method after processing the records. | ||
* | ||
* In the middle, the method will iterate over the records and call the | ||
* processRecordSync method for each record. | ||
* | ||
* @returns List of processed records | ||
@@ -73,7 +98,15 @@ */ | ||
/** | ||
* Set class instance attributes before execution | ||
* @param records List of records to be processed | ||
* @param handler CallableFunction to process entries of "records" | ||
* @param options Options to be used during processing | ||
* @returns this object | ||
* Set up the processor with the records and the handler | ||
* | ||
* This method should be called before processing the records to | ||
* bind the records and the handler for a specific invocation to | ||
* the processor. | ||
* | ||
* We use a separate method to do this rather than the constructor | ||
* to allow for reusing the processor instance across multiple invocations | ||
* by instantiating the processor outside of the Lambda function handler. | ||
* | ||
* @param records Array of records to be processed | ||
* @param handler CallableFunction to process each record from the batch | ||
* @param options Options to be used during processing (optional) | ||
*/ | ||
@@ -89,6 +122,9 @@ register(records, handler, options) { | ||
/** | ||
* Keeps track of batch records that were processed successfully | ||
* @param record record that succeeded processing | ||
* @param result result from record handler | ||
* @returns SuccessResponse object with ["success", result, original record] | ||
* Method to handle a record that succeeded processing | ||
* | ||
* This method should be called when a record succeeds processing so that | ||
* the processor can keep track of the result and the record that succeeded. | ||
* | ||
* @param record Record that succeeded processing | ||
* @param result Result from record handler | ||
*/ | ||
@@ -95,0 +131,0 @@ successHandler(record, result) { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch asynchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed asynchronously and in parallel. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = async (record: DynamoDBRecord): Promise<void> => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
declare class BatchProcessor extends BasePartialBatchProcessor { | ||
/** | ||
* Handle a record asynchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
processRecord(record: BaseRecord): Promise<SuccessResponse | FailureResponse>; | ||
/** | ||
* Process a record with instance's handler | ||
* @param _record Batch record to be processed | ||
* @returns response of success or failure | ||
* @throws {BatchProcessingError} This method is not implemented for synchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
@@ -13,0 +103,0 @@ processRecordSync(_record: BaseRecord): SuccessResponse | FailureResponse; |
@@ -7,5 +7,95 @@ "use strict"; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch asynchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed asynchronously and in parallel. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = async (record: DynamoDBRecord): Promise<void> => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
class BatchProcessor extends BasePartialBatchProcessor_js_1.BasePartialBatchProcessor { | ||
/** | ||
* Handle a record asynchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
async processRecord(record) { | ||
@@ -22,5 +112,5 @@ try { | ||
/** | ||
* Process a record with instance's handler | ||
* @param _record Batch record to be processed | ||
* @returns response of success or failure | ||
* @throws {BatchProcessingError} This method is not implemented for synchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
@@ -27,0 +117,0 @@ processRecordSync(_record) { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch synchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed synchronously and in sequence. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.SQS); | ||
* | ||
* const recordHandler = (record: SQSRecord): void => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = (record: KinesisStreamRecord): void => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = (record: DynamoDBRecord): void => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
declare class BatchProcessorSync extends BasePartialBatchProcessor { | ||
/** | ||
* @throws {BatchProcessingError} This method is not implemented for asynchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
processRecord(_record: BaseRecord): Promise<SuccessResponse | FailureResponse>; | ||
/** | ||
* Process a record with instance's handler | ||
* @param record Batch record to be processed | ||
* @returns response of success or failure | ||
* Handle a record synchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
@@ -13,0 +103,0 @@ processRecordSync(record: BaseRecord): SuccessResponse | FailureResponse; |
@@ -7,5 +7,86 @@ "use strict"; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch synchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed synchronously and in sequence. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.SQS); | ||
* | ||
* const recordHandler = (record: SQSRecord): void => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = (record: KinesisStreamRecord): void => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = (record: DynamoDBRecord): void => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
class BatchProcessorSync extends BasePartialBatchProcessor_js_1.BasePartialBatchProcessor { | ||
/** | ||
* @throws {BatchProcessingError} This method is not implemented for asynchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
async processRecord(_record) { | ||
@@ -15,5 +96,14 @@ throw new errors_js_1.BatchProcessingError('Not implemented. Use process() instead.'); | ||
/** | ||
* Process a record with instance's handler | ||
* @param record Batch record to be processed | ||
* @returns response of success or failure | ||
* Handle a record synchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
@@ -20,0 +110,0 @@ processRecordSync(record) { |
import type { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
import type { PartialItemFailureResponse, EventSourceDataClassTypes } from './types.js'; | ||
/** | ||
* Enum of supported event types for the utility | ||
*/ | ||
declare const EventType: { | ||
@@ -8,3 +11,9 @@ readonly SQS: "SQS"; | ||
}; | ||
/** | ||
* Default response for the partial batch processor | ||
*/ | ||
declare const DEFAULT_RESPONSE: PartialItemFailureResponse; | ||
/** | ||
* Mapping of event types to their respective data classes | ||
*/ | ||
declare const DATA_CLASS_MAPPING: { | ||
@@ -11,0 +20,0 @@ SQS: (record: EventSourceDataClassTypes) => SQSRecord; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.DATA_CLASS_MAPPING = exports.DEFAULT_RESPONSE = exports.EventType = void 0; | ||
/** | ||
* Enum of supported event types for the utility | ||
*/ | ||
const EventType = { | ||
@@ -10,2 +13,5 @@ SQS: 'SQS', | ||
exports.EventType = EventType; | ||
/** | ||
* Default response for the partial batch processor | ||
*/ | ||
const DEFAULT_RESPONSE = { | ||
@@ -15,2 +21,5 @@ batchItemFailures: [], | ||
exports.DEFAULT_RESPONSE = DEFAULT_RESPONSE; | ||
/** | ||
* Mapping of event types to their respective data classes | ||
*/ | ||
const DATA_CLASS_MAPPING = { | ||
@@ -17,0 +26,0 @@ [EventType.SQS]: (record) => record, |
@@ -23,2 +23,9 @@ /** | ||
/** | ||
* Error thrown by the Batch Processing utility when a previous record from | ||
* SQS FIFO queue message group fails processing. | ||
*/ | ||
declare class SqsFifoMessageGroupShortCircuitError extends BatchProcessingError { | ||
constructor(); | ||
} | ||
/** | ||
* Error thrown by the Batch Processing utility when a partial processor receives an unexpected | ||
@@ -30,3 +37,3 @@ * batch type. | ||
} | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, }; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, }; | ||
//# sourceMappingURL=errors.d.ts.map |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.UnexpectedBatchTypeError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = void 0; | ||
exports.UnexpectedBatchTypeError = exports.SqsFifoMessageGroupShortCircuitError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = void 0; | ||
const constants_js_1 = require("./constants.js"); | ||
@@ -39,2 +39,13 @@ /** | ||
/** | ||
* Error thrown by the Batch Processing utility when a previous record from | ||
* SQS FIFO queue message group fails processing. | ||
*/ | ||
class SqsFifoMessageGroupShortCircuitError extends BatchProcessingError { | ||
constructor() { | ||
super('A previous record from this message group failed processing'); | ||
this.name = 'SqsFifoMessageGroupShortCircuitError'; | ||
} | ||
} | ||
exports.SqsFifoMessageGroupShortCircuitError = SqsFifoMessageGroupShortCircuitError; | ||
/** | ||
* Error thrown by the Batch Processing utility when a partial processor receives an unexpected | ||
@@ -41,0 +52,0 @@ * batch type. |
export { EventType } from './constants.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
@@ -4,0 +4,0 @@ export { BatchProcessorSync } from './BatchProcessorSync.js'; |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.SqsFifoPartialProcessor = exports.processPartialResponse = exports.processPartialResponseSync = exports.BatchProcessor = exports.BatchProcessorSync = exports.BasePartialBatchProcessor = exports.UnexpectedBatchTypeError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = exports.EventType = void 0; | ||
exports.SqsFifoPartialProcessor = exports.processPartialResponse = exports.processPartialResponseSync = exports.BatchProcessor = exports.BatchProcessorSync = exports.BasePartialBatchProcessor = exports.UnexpectedBatchTypeError = exports.SqsFifoMessageGroupShortCircuitError = exports.SqsFifoShortCircuitError = exports.FullBatchFailureError = exports.BatchProcessingError = exports.EventType = void 0; | ||
var constants_js_1 = require("./constants.js"); | ||
@@ -10,2 +10,3 @@ Object.defineProperty(exports, "EventType", { enumerable: true, get: function () { return constants_js_1.EventType; } }); | ||
Object.defineProperty(exports, "SqsFifoShortCircuitError", { enumerable: true, get: function () { return errors_js_1.SqsFifoShortCircuitError; } }); | ||
Object.defineProperty(exports, "SqsFifoMessageGroupShortCircuitError", { enumerable: true, get: function () { return errors_js_1.SqsFifoMessageGroupShortCircuitError; } }); | ||
Object.defineProperty(exports, "UnexpectedBatchTypeError", { enumerable: true, get: function () { return errors_js_1.UnexpectedBatchTypeError; } }); | ||
@@ -12,0 +13,0 @@ var BasePartialBatchProcessor_js_1 = require("./BasePartialBatchProcessor.js"); |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, BatchProcessingOptions, PartialItemFailureResponse } from './types.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* Higher level function to process a batch of records asynchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within asynchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records asynchronously | ||
* and in parallel. If you need to process the records synchronously, you can | ||
* use the {@link processPartialResponseSync} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Async function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options | ||
* @returns Lambda Partial Batch Response | ||
*/ | ||
@@ -11,0 +44,0 @@ declare const processPartialResponse: (event: { |
@@ -6,8 +6,41 @@ "use strict"; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* Higher level function to process a batch of records asynchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within asynchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records asynchronously | ||
* and in parallel. If you need to process the records synchronously, you can | ||
* use the {@link processPartialResponseSync} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Async function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options | ||
* @returns Lambda Partial Batch Response | ||
*/ | ||
@@ -14,0 +47,0 @@ const processPartialResponse = async (event, recordHandler, processor, options) => { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, BatchProcessingOptions, PartialItemFailureResponse } from './types.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* @returns Lambda Partial Batch Response | ||
* Higher level function to process a batch of records synchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within synchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records synchronously | ||
* and in sequence. If you need to process the records asynchronously, you can | ||
* use the {@link processPartialResponse} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* When working with SQS FIFO queues, we will stop processing at the first failure | ||
* and mark unprocessed messages as failed to preserve ordering. However, if you want to | ||
* continue processing messages from different group IDs, you can enable the `skipGroupOnError` | ||
* option for seamless processing of messages from various group IDs. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* SqsFifoPartialProcessor, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new SqsFifoPartialProcessor(); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* skipGroupOnError: true | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Sync function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options, which can vary with chosen batch processor implementation | ||
*/ | ||
declare const processPartialResponseSync: (event: { | ||
declare const processPartialResponseSync: <T extends BasePartialBatchProcessor>(event: { | ||
Records: BaseRecord[]; | ||
}, recordHandler: CallableFunction, processor: BasePartialBatchProcessor, options?: BatchProcessingOptions) => PartialItemFailureResponse; | ||
}, recordHandler: CallableFunction, processor: T, options?: BatchProcessingOptions<T>) => PartialItemFailureResponse; | ||
export { processPartialResponseSync }; | ||
//# sourceMappingURL=processPartialResponseSync.d.ts.map |
@@ -6,7 +6,67 @@ "use strict"; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* @returns Lambda Partial Batch Response | ||
* Higher level function to process a batch of records synchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within synchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records synchronously | ||
* and in sequence. If you need to process the records asynchronously, you can | ||
* use the {@link processPartialResponse} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* When working with SQS FIFO queues, we will stop processing at the first failure | ||
* and mark unprocessed messages as failed to preserve ordering. However, if you want to | ||
* continue processing messages from different group IDs, you can enable the `skipGroupOnError` | ||
* option for seamless processing of messages from various group IDs. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* SqsFifoPartialProcessor, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new SqsFifoPartialProcessor(); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* skipGroupOnError: true | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Sync function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options, which can vary with chosen batch processor implementation | ||
*/ | ||
@@ -13,0 +73,0 @@ const processPartialResponseSync = (event, recordHandler, processor, options) => { |
import { BatchProcessorSync } from './BatchProcessorSync.js'; | ||
import type { FailureResponse, SuccessResponse } from './types.js'; | ||
import type { EventSourceDataClassTypes, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS FIFO queues | ||
* Stops processing records when the first record fails | ||
* The remaining records are reported as failed items | ||
* Batch processor for SQS FIFO queues | ||
* | ||
* This class extends the {@link BatchProcessorSync} class and provides | ||
* a mechanism to process records from SQS FIFO queues synchronously. | ||
* | ||
* By default, we will stop processing at the first failure and mark unprocessed messages as failed to preserve ordering. | ||
* | ||
* However, this behavior may not be optimal for customers who wish to proceed with processing messages from a different group ID. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
*/ | ||
declare class SqsFifoPartialProcessor extends BatchProcessorSync { | ||
#private; | ||
constructor(); | ||
/** | ||
* Call instance's handler for each record. | ||
* When the first failed message is detected, the process is short-circuited | ||
* And the remaining messages are reported as failed items | ||
* Handles a failure for a given record. | ||
* Adds the current group ID to the set of failed group IDs if `skipGroupOnError` is true. | ||
* @param record - The record that failed. | ||
* @param exception - The error that occurred. | ||
* @returns The failure response. | ||
*/ | ||
failureHandler(record: EventSourceDataClassTypes, exception: Error): FailureResponse; | ||
/** | ||
* Process a record with a synchronous handler | ||
* | ||
* This method orchestrates the processing of a batch of records synchronously | ||
* for SQS FIFO queues. | ||
* | ||
* The method calls the prepare hook to initialize the processor and then | ||
* iterates over each record in the batch, processing them one by one. | ||
* | ||
* If one of them fails and `skipGroupOnError` is not true, the method short circuits | ||
* the processing and fails the remaining records in the batch. | ||
* | ||
* If one of them fails and `skipGroupOnError` is true, then the method fails the current record | ||
* if the message group has any previous failure, otherwise keeps processing. | ||
* | ||
* Then, it calls the clean hook to clean up the processor and returns the | ||
* processed records. | ||
*/ | ||
processSync(): (SuccessResponse | FailureResponse)[]; | ||
/** | ||
* Starting from the first failure index, fail all remaining messages and append them to the result list | ||
* Starting from the first failure index, fail all remaining messages regardless | ||
* of their group ID. | ||
* | ||
* This short circuit mechanism is used when we detect a failed message in the batch. | ||
* | ||
* Since messages in a FIFO queue are processed in order, we must stop processing any | ||
* remaining messages in the batch to prevent out-of-order processing. | ||
* | ||
* @param firstFailureIndex Index of first message that failed | ||
* @param result List of success and failure responses with remaining messages failed | ||
* @param processedRecords Array of response items that have been processed both successfully and unsuccessfully | ||
*/ | ||
shortCircuitProcessing(firstFailureIndex: number, processedRecords: (SuccessResponse | FailureResponse)[]): (SuccessResponse | FailureResponse)[]; | ||
protected shortCircuitProcessing(firstFailureIndex: number, processedRecords: (SuccessResponse | FailureResponse)[]): (SuccessResponse | FailureResponse)[]; | ||
} | ||
export { SqsFifoPartialProcessor }; | ||
//# sourceMappingURL=SqsFifoPartialProcessor.d.ts.map |
"use strict"; | ||
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) { | ||
if (kind === "m") throw new TypeError("Private method is not writable"); | ||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter"); | ||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it"); | ||
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value; | ||
}; | ||
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) { | ||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter"); | ||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it"); | ||
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver); | ||
}; | ||
var _SqsFifoPartialProcessor_instances, _SqsFifoPartialProcessor_currentGroupId, _SqsFifoPartialProcessor_failedGroupIds, _SqsFifoPartialProcessor_addToFailedGroup, _SqsFifoPartialProcessor_processFailRecord, _SqsFifoPartialProcessor_setCurrentGroup; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
@@ -8,5 +20,31 @@ exports.SqsFifoPartialProcessor = void 0; | ||
/** | ||
* Process native partial responses from SQS FIFO queues | ||
* Stops processing records when the first record fails | ||
* The remaining records are reported as failed items | ||
* Batch processor for SQS FIFO queues | ||
* | ||
* This class extends the {@link BatchProcessorSync} class and provides | ||
* a mechanism to process records from SQS FIFO queues synchronously. | ||
* | ||
* By default, we will stop processing at the first failure and mark unprocessed messages as failed to preserve ordering. | ||
* | ||
* However, this behavior may not be optimal for customers who wish to proceed with processing messages from a different group ID. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
*/ | ||
@@ -16,8 +54,44 @@ class SqsFifoPartialProcessor extends BatchProcessorSync_js_1.BatchProcessorSync { | ||
super(constants_js_1.EventType.SQS); | ||
_SqsFifoPartialProcessor_instances.add(this); | ||
/** | ||
* The ID of the current message group being processed. | ||
*/ | ||
_SqsFifoPartialProcessor_currentGroupId.set(this, void 0); | ||
/** | ||
* A set of group IDs that have already encountered failures. | ||
*/ | ||
_SqsFifoPartialProcessor_failedGroupIds.set(this, void 0); | ||
__classPrivateFieldSet(this, _SqsFifoPartialProcessor_failedGroupIds, new Set(), "f"); | ||
} | ||
/** | ||
* Call instance's handler for each record. | ||
* When the first failed message is detected, the process is short-circuited | ||
* And the remaining messages are reported as failed items | ||
* Handles a failure for a given record. | ||
* Adds the current group ID to the set of failed group IDs if `skipGroupOnError` is true. | ||
* @param record - The record that failed. | ||
* @param exception - The error that occurred. | ||
* @returns The failure response. | ||
*/ | ||
failureHandler(record, exception) { | ||
if (this.options?.skipGroupOnError && __classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_addToFailedGroup).call(this, __classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")); | ||
} | ||
return super.failureHandler(record, exception); | ||
} | ||
/** | ||
* Process a record with a synchronous handler | ||
* | ||
* This method orchestrates the processing of a batch of records synchronously | ||
* for SQS FIFO queues. | ||
* | ||
* The method calls the prepare hook to initialize the processor and then | ||
* iterates over each record in the batch, processing them one by one. | ||
* | ||
* If one of them fails and `skipGroupOnError` is not true, the method short circuits | ||
* the processing and fails the remaining records in the batch. | ||
* | ||
* If one of them fails and `skipGroupOnError` is true, then the method fails the current record | ||
* if the message group has any previous failure, otherwise keeps processing. | ||
* | ||
* Then, it calls the clean hook to clean up the processor and returns the | ||
* processed records. | ||
*/ | ||
processSync() { | ||
@@ -28,8 +102,18 @@ this.prepare(); | ||
for (const record of this.records) { | ||
// If we have any failed messages, it means the last message failed | ||
// We should then short circuit the process and fail remaining messages | ||
if (this.failureMessages.length != 0) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_setCurrentGroup).call(this, record.attributes?.MessageGroupId); | ||
// If we have any failed messages, we should then short circuit the process and | ||
// fail remaining messages unless `skipGroupOnError` is true | ||
const shouldShortCircuit = !this.options?.skipGroupOnError && this.failureMessages.length !== 0; | ||
if (shouldShortCircuit) { | ||
return this.shortCircuitProcessing(currentIndex, processedRecords); | ||
} | ||
processedRecords.push(this.processRecordSync(record)); | ||
// If `skipGroupOnError` is true and the current group has previously failed, | ||
// then we should skip processing the current group. | ||
const shouldSkipCurrentGroup = this.options?.skipGroupOnError && | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f") && | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_failedGroupIds, "f").has(__classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")); | ||
const result = shouldSkipCurrentGroup | ||
? __classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_processFailRecord).call(this, record, new errors_js_1.SqsFifoMessageGroupShortCircuitError()) | ||
: this.processRecordSync(record); | ||
processedRecords.push(result); | ||
currentIndex++; | ||
@@ -41,5 +125,12 @@ } | ||
/** | ||
* Starting from the first failure index, fail all remaining messages and append them to the result list | ||
* Starting from the first failure index, fail all remaining messages regardless | ||
* of their group ID. | ||
* | ||
* This short circuit mechanism is used when we detect a failed message in the batch. | ||
* | ||
* Since messages in a FIFO queue are processed in order, we must stop processing any | ||
* remaining messages in the batch to prevent out-of-order processing. | ||
* | ||
* @param firstFailureIndex Index of first message that failed | ||
* @param result List of success and failure responses with remaining messages failed | ||
* @param processedRecords Array of response items that have been processed both successfully and unsuccessfully | ||
*/ | ||
@@ -49,4 +140,3 @@ shortCircuitProcessing(firstFailureIndex, processedRecords) { | ||
for (const record of remainingRecords) { | ||
const data = this.toBatchType(record, this.eventType); | ||
processedRecords.push(this.failureHandler(data, new errors_js_1.SqsFifoShortCircuitError())); | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_processFailRecord).call(this, record, new errors_js_1.SqsFifoShortCircuitError()); | ||
} | ||
@@ -58,1 +148,9 @@ this.clean(); | ||
exports.SqsFifoPartialProcessor = SqsFifoPartialProcessor; | ||
_SqsFifoPartialProcessor_currentGroupId = new WeakMap(), _SqsFifoPartialProcessor_failedGroupIds = new WeakMap(), _SqsFifoPartialProcessor_instances = new WeakSet(), _SqsFifoPartialProcessor_addToFailedGroup = function _SqsFifoPartialProcessor_addToFailedGroup(group) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_failedGroupIds, "f").add(group); | ||
}, _SqsFifoPartialProcessor_processFailRecord = function _SqsFifoPartialProcessor_processFailRecord(record, exception) { | ||
const data = this.toBatchType(record, this.eventType); | ||
return this.failureHandler(data, exception); | ||
}, _SqsFifoPartialProcessor_setCurrentGroup = function _SqsFifoPartialProcessor_setCurrentGroup(group) { | ||
__classPrivateFieldSet(this, _SqsFifoPartialProcessor_currentGroupId, group, "f"); | ||
}; |
import type { Context, DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
type BatchProcessingOptions = { | ||
context: Context; | ||
import { SqsFifoPartialProcessor } from './SqsFifoPartialProcessor.js'; | ||
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
/** | ||
* Options for batch processing | ||
* | ||
* @template T The type of the batch processor, defaults to BasePartialBatchProcessor | ||
* @property context The context object provided by the AWS Lambda runtime | ||
* @property skipGroupOnError The option to group on error during processing | ||
*/ | ||
type BatchProcessingOptions<T = BasePartialBatchProcessor> = { | ||
/** | ||
* The context object provided by the AWS Lambda runtime. When provided, | ||
* it's made available to the handler function you specify | ||
*/ | ||
context?: Context; | ||
/** | ||
* This option is only available for SqsFifoPartialProcessor. | ||
* If true skip the group on error during processing. | ||
*/ | ||
skipGroupOnError?: T extends SqsFifoPartialProcessor ? boolean : never; | ||
}; | ||
/** | ||
* The types of data that can be provided by an event source | ||
*/ | ||
type EventSourceDataClassTypes = SQSRecord | KinesisStreamRecord | DynamoDBRecord; | ||
type RecordValue = unknown; | ||
/** | ||
* Type representing a record from an event source | ||
*/ | ||
type BaseRecord = { | ||
[key: string]: RecordValue; | ||
[key: string]: unknown; | ||
} | EventSourceDataClassTypes; | ||
type ResultType = unknown; | ||
type SuccessResponse = ['success', ResultType, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a successful response | ||
* | ||
* The first element is the string literal 'success', | ||
* the second element is the result of the handler function, | ||
* and the third element is the type of data provided by the event source | ||
*/ | ||
type SuccessResponse = ['success', unknown, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a failure response | ||
* | ||
* The first element is the string literal 'fail', | ||
* the second element is the error message, | ||
* and the third element is the type of data provided by the event source | ||
*/ | ||
type FailureResponse = ['fail', string, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a partial failure response | ||
*/ | ||
type PartialItemFailures = { | ||
itemIdentifier: string; | ||
}; | ||
/** | ||
* Type representing a partial failure response | ||
*/ | ||
type PartialItemFailureResponse = { | ||
batchItemFailures: PartialItemFailures[]; | ||
}; | ||
export type { BatchProcessingOptions, BaseRecord, EventSourceDataClassTypes, ResultType, SuccessResponse, FailureResponse, PartialItemFailures, PartialItemFailureResponse, }; | ||
export type { BatchProcessingOptions, BaseRecord, EventSourceDataClassTypes, SuccessResponse, FailureResponse, PartialItemFailures, PartialItemFailureResponse, }; | ||
//# sourceMappingURL=types.d.ts.map |
@@ -6,5 +6,17 @@ import type { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
/** | ||
* Process batch and partially report failed items | ||
* Base abstract class for processing batch records with partial failure handling | ||
* | ||
* This class extends the {@link BasePartialProcessor} class and adds additional | ||
* functionality to handle batch processing. Specifically, it provides methods | ||
* to collect failed records and build the partial failure response. | ||
* | ||
* @abstract | ||
*/ | ||
declare abstract class BasePartialBatchProcessor extends BasePartialProcessor { | ||
/** | ||
* Mapping of event types to their respective failure collectors | ||
* | ||
* Each service expects a different format for partial failure reporting, | ||
* this mapping ensures that the correct format is used for each event type. | ||
*/ | ||
COLLECTOR_MAPPING: { | ||
@@ -15,51 +27,88 @@ SQS: () => PartialItemFailures[]; | ||
}; | ||
/** | ||
* Response to be returned after processing | ||
*/ | ||
batchResponse: PartialItemFailureResponse; | ||
/** | ||
* Type of event that the processor is handling | ||
*/ | ||
eventType: keyof typeof EventType; | ||
/** | ||
* Initializes base batch processing class | ||
* @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
constructor(eventType: keyof typeof EventType); | ||
/** | ||
* Report messages to be deleted in case of partial failures | ||
* Clean up logic to be run after processing a batch | ||
* | ||
* If the entire batch failed, and the utility is not configured otherwise, | ||
* this method will throw a `FullBatchFailureError` with the list of errors | ||
* that occurred during processing. | ||
* | ||
* Otherwise, it will build the partial failure response based on the event type. | ||
*/ | ||
clean(): void; | ||
/** | ||
* Collects identifiers of failed items for a DynamoDB stream | ||
* @returns list of identifiers for failed items | ||
* Collect the identifiers of failed items for a DynamoDB stream | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectDynamoDBFailures(): PartialItemFailures[]; | ||
/** | ||
* Collects identifiers of failed items for a Kinesis stream | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for a Kinesis batch | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectKinesisFailures(): PartialItemFailures[]; | ||
/** | ||
* Collects identifiers of failed items for an SQS batch | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for an SQS batch | ||
* | ||
* The failures are collected based on the message ID of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
collectSqsFailures(): PartialItemFailures[]; | ||
/** | ||
* Determines whether all records in a batch failed to process | ||
* @returns true if all records resulted in exception results | ||
* Determine if the entire batch failed | ||
* | ||
* If the number of errors is equal to the number of records, then the | ||
* entire batch failed and this method will return `true`. | ||
*/ | ||
entireBatchFailed(): boolean; | ||
/** | ||
* Collects identifiers for failed batch items | ||
* @returns formatted messages to use in batch deletion | ||
* Collect identifiers for failed batch items | ||
* | ||
* The method will call the appropriate collector based on the event type | ||
* and return the list of failed items. | ||
*/ | ||
getMessagesToReport(): PartialItemFailures[]; | ||
/** | ||
* Determines if any records failed to process | ||
* @returns true if any records resulted in exception | ||
* Determine if there are any failed records to report | ||
* | ||
* If there are no failed records, then the batch was successful | ||
* and this method will return `false`. | ||
*/ | ||
hasMessagesToReport(): boolean; | ||
/** | ||
* Remove results from previous execution | ||
* Set up the processor with the initial state ready for processing | ||
*/ | ||
prepare(): void; | ||
/** | ||
* @returns Batch items that failed processing, if any | ||
* Get the response from the batch processing | ||
*/ | ||
response(): PartialItemFailureResponse; | ||
/** | ||
* Forward a record to the appropriate batch type | ||
* | ||
* Based on the event type that the processor was initialized with, this method | ||
* will cast the record to the appropriate batch type handler. | ||
* | ||
* @param record The record to be processed | ||
* @param eventType The type of event to process | ||
*/ | ||
toBatchType(record: EventSourceDataClassTypes, eventType: keyof typeof EventType): SQSRecord | KinesisStreamRecord | DynamoDBRecord; | ||
@@ -66,0 +115,0 @@ } |
@@ -5,3 +5,9 @@ import { BasePartialProcessor } from './BasePartialProcessor.js'; | ||
/** | ||
* Process batch and partially report failed items | ||
* Base abstract class for processing batch records with partial failure handling | ||
* | ||
* This class extends the {@link BasePartialProcessor} class and adds additional | ||
* functionality to handle batch processing. Specifically, it provides methods | ||
* to collect failed records and build the partial failure response. | ||
* | ||
* @abstract | ||
*/ | ||
@@ -11,3 +17,4 @@ class BasePartialBatchProcessor extends BasePartialProcessor { | ||
* Initializes base batch processing class | ||
* @param eventType Whether this is SQS, DynamoDB stream, or Kinesis data stream event | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
@@ -25,3 +32,9 @@ constructor(eventType) { | ||
/** | ||
* Report messages to be deleted in case of partial failures | ||
* Clean up logic to be run after processing a batch | ||
* | ||
* If the entire batch failed, and the utility is not configured otherwise, | ||
* this method will throw a `FullBatchFailureError` with the list of errors | ||
* that occurred during processing. | ||
* | ||
* Otherwise, it will build the partial failure response based on the event type. | ||
*/ | ||
@@ -39,4 +52,7 @@ clean() { | ||
/** | ||
* Collects identifiers of failed items for a DynamoDB stream | ||
* @returns list of identifiers for failed items | ||
* Collect the identifiers of failed items for a DynamoDB stream | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -54,4 +70,7 @@ collectDynamoDBFailures() { | ||
/** | ||
* Collects identifiers of failed items for a Kinesis stream | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for a Kinesis batch | ||
* | ||
* The failures are collected based on the sequence number of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -67,4 +86,7 @@ collectKinesisFailures() { | ||
/** | ||
* Collects identifiers of failed items for an SQS batch | ||
* @returns list of identifiers for failed items | ||
* Collect identifiers of failed items for an SQS batch | ||
* | ||
* The failures are collected based on the message ID of the record | ||
* and formatted as a list of objects with the key `itemIdentifier` as | ||
* expected by the service. | ||
*/ | ||
@@ -80,4 +102,6 @@ collectSqsFailures() { | ||
/** | ||
* Determines whether all records in a batch failed to process | ||
* @returns true if all records resulted in exception results | ||
* Determine if the entire batch failed | ||
* | ||
* If the number of errors is equal to the number of records, then the | ||
* entire batch failed and this method will return `true`. | ||
*/ | ||
@@ -88,4 +112,6 @@ entireBatchFailed() { | ||
/** | ||
* Collects identifiers for failed batch items | ||
* @returns formatted messages to use in batch deletion | ||
* Collect identifiers for failed batch items | ||
* | ||
* The method will call the appropriate collector based on the event type | ||
* and return the list of failed items. | ||
*/ | ||
@@ -96,4 +122,6 @@ getMessagesToReport() { | ||
/** | ||
* Determines if any records failed to process | ||
* @returns true if any records resulted in exception | ||
* Determine if there are any failed records to report | ||
* | ||
* If there are no failed records, then the batch was successful | ||
* and this method will return `false`. | ||
*/ | ||
@@ -104,3 +132,3 @@ hasMessagesToReport() { | ||
/** | ||
* Remove results from previous execution | ||
* Set up the processor with the initial state ready for processing | ||
*/ | ||
@@ -114,3 +142,3 @@ prepare() { | ||
/** | ||
* @returns Batch items that failed processing, if any | ||
* Get the response from the batch processing | ||
*/ | ||
@@ -120,2 +148,11 @@ response() { | ||
} | ||
/** | ||
* Forward a record to the appropriate batch type | ||
* | ||
* Based on the event type that the processor was initialized with, this method | ||
* will cast the record to the appropriate batch type handler. | ||
* | ||
* @param record The record to be processed | ||
* @param eventType The type of event to process | ||
*/ | ||
toBatchType(record, eventType) { | ||
@@ -122,0 +159,0 @@ return DATA_CLASS_MAPPING[eventType](record); |
@@ -1,34 +0,80 @@ | ||
import type { BaseRecord, BatchProcessingOptions, EventSourceDataClassTypes, FailureResponse, ResultType, SuccessResponse } from './types.js'; | ||
import type { BaseRecord, BatchProcessingOptions, EventSourceDataClassTypes, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Abstract class for batch processors. | ||
* | ||
* This class provides a common interface for processing records in a batch. | ||
* | ||
* Batch processors implementing this class should provide implementations for | ||
* a number of abstract methods that are specific to the type of processor or the | ||
* type of records being processed. | ||
* | ||
* The class comes with a few helper methods and hooks that can be used to prepare | ||
* the processor before processing records, clean up after processing records, and | ||
* handle records that succeed or fail processing. | ||
* | ||
* @abstract | ||
*/ | ||
declare abstract class BasePartialProcessor { | ||
/** | ||
* List of errors that occurred during processing | ||
*/ | ||
errors: Error[]; | ||
/** | ||
* List of records that failed processing | ||
*/ | ||
failureMessages: EventSourceDataClassTypes[]; | ||
/** | ||
* Record handler provided by customers to process records | ||
*/ | ||
handler: CallableFunction; | ||
/** | ||
* Options to be used during processing (optional) | ||
*/ | ||
options?: BatchProcessingOptions; | ||
/** | ||
* List of records to be processed | ||
*/ | ||
records: BaseRecord[]; | ||
successMessages: EventSourceDataClassTypes[]; | ||
/** | ||
* Initializes base processor class | ||
* List of records that were processed successfully | ||
*/ | ||
successMessages: EventSourceDataClassTypes[]; | ||
constructor(); | ||
/** | ||
* Clean class instance after processing | ||
* Clean or resets the processor instance after completing a batch | ||
* | ||
* This method should be called after processing a full batch to reset the processor. | ||
* | ||
* You can use this as a hook to run any cleanup logic after processing the records. | ||
* | ||
* @abstract | ||
*/ | ||
abstract clean(): void; | ||
/** | ||
* Keeps track of batch records that failed processing | ||
* @param record record that failed processing | ||
* @param exception exception that was thrown | ||
* @returns FailureResponse object with ["fail", exception, original record] | ||
* Method to handle a record that failed processing | ||
* | ||
* This method should be called when a record fails processing so that | ||
* the processor can keep track of the error and the record that failed. | ||
* | ||
* @param record Record that failed processing | ||
* @param error Error that was thrown | ||
*/ | ||
failureHandler(record: EventSourceDataClassTypes, exception: Error): FailureResponse; | ||
failureHandler(record: EventSourceDataClassTypes, error: Error): FailureResponse; | ||
/** | ||
* Prepare class instance before processing | ||
* | ||
* This method should be called before processing the records | ||
* | ||
* You can use this as a hook to run any setup logic before processing the records. | ||
* | ||
* @abstract | ||
*/ | ||
abstract prepare(): void; | ||
/** | ||
* Call instance's handler for each record | ||
* @returns List of processed records | ||
* Process all records with an asyncronous handler | ||
* | ||
* Once called, the processor will create an array of promises to process each record | ||
* and wait for all of them to settle before returning the results. | ||
* | ||
* Before and after processing, the processor will call the prepare and clean methods respectively. | ||
*/ | ||
@@ -39,2 +85,13 @@ process(): Promise<(SuccessResponse | FailureResponse)[]>; | ||
* | ||
* An implementation of this method is required for asyncronous processors. | ||
* | ||
* When implementing this method, you should at least call the successHandler method | ||
* when a record succeeds processing and the failureHandler method when a record | ||
* fails processing. | ||
* | ||
* This is to ensure that the processor keeps track of the results and the records | ||
* that succeeded and failed processing. | ||
* | ||
* @abstract | ||
* | ||
* @param record Record to be processed | ||
@@ -44,3 +101,15 @@ */ | ||
/** | ||
* Process a record with the handler | ||
* Process a record with a synchronous handler | ||
* | ||
* An implementation of this method is required for synchronous processors. | ||
* | ||
* When implementing this method, you should at least call the successHandler method | ||
* when a record succeeds processing and the failureHandler method when a record | ||
* fails processing. | ||
* | ||
* This is to ensure that the processor keeps track of the results and the records | ||
* that succeeded and failed processing. | ||
* | ||
* @abstract | ||
* | ||
* @param record Record to be processed | ||
@@ -50,3 +119,11 @@ */ | ||
/** | ||
* Call instance's handler for each record | ||
* Orchestrate the processing of a batch of records synchronously | ||
* and sequentially. | ||
* | ||
* The method is responsible for calling the prepare method before | ||
* processing the records and the clean method after processing the records. | ||
* | ||
* In the middle, the method will iterate over the records and call the | ||
* processRecordSync method for each record. | ||
* | ||
* @returns List of processed records | ||
@@ -56,18 +133,29 @@ */ | ||
/** | ||
* Set class instance attributes before execution | ||
* @param records List of records to be processed | ||
* @param handler CallableFunction to process entries of "records" | ||
* @param options Options to be used during processing | ||
* @returns this object | ||
* Set up the processor with the records and the handler | ||
* | ||
* This method should be called before processing the records to | ||
* bind the records and the handler for a specific invocation to | ||
* the processor. | ||
* | ||
* We use a separate method to do this rather than the constructor | ||
* to allow for reusing the processor instance across multiple invocations | ||
* by instantiating the processor outside of the Lambda function handler. | ||
* | ||
* @param records Array of records to be processed | ||
* @param handler CallableFunction to process each record from the batch | ||
* @param options Options to be used during processing (optional) | ||
*/ | ||
register(records: BaseRecord[], handler: CallableFunction, options?: BatchProcessingOptions): BasePartialProcessor; | ||
register(records: BaseRecord[], handler: CallableFunction, options?: BatchProcessingOptions): this; | ||
/** | ||
* Keeps track of batch records that were processed successfully | ||
* @param record record that succeeded processing | ||
* @param result result from record handler | ||
* @returns SuccessResponse object with ["success", result, original record] | ||
* Method to handle a record that succeeded processing | ||
* | ||
* This method should be called when a record succeeds processing so that | ||
* the processor can keep track of the result and the record that succeeded. | ||
* | ||
* @param record Record that succeeded processing | ||
* @param result Result from record handler | ||
*/ | ||
successHandler(record: EventSourceDataClassTypes, result: ResultType): SuccessResponse; | ||
successHandler(record: EventSourceDataClassTypes, result: unknown): SuccessResponse; | ||
} | ||
export { BasePartialProcessor }; | ||
//# sourceMappingURL=BasePartialProcessor.d.ts.map |
/** | ||
* Abstract class for batch processors. | ||
* | ||
* This class provides a common interface for processing records in a batch. | ||
* | ||
* Batch processors implementing this class should provide implementations for | ||
* a number of abstract methods that are specific to the type of processor or the | ||
* type of records being processed. | ||
* | ||
* The class comes with a few helper methods and hooks that can be used to prepare | ||
* the processor before processing records, clean up after processing records, and | ||
* handle records that succeed or fail processing. | ||
* | ||
* @abstract | ||
*/ | ||
class BasePartialProcessor { | ||
/** | ||
* Initializes base processor class | ||
*/ | ||
constructor() { | ||
@@ -13,13 +22,17 @@ this.successMessages = []; | ||
this.records = []; | ||
// No-op function to avoid null checks, will be overridden by customer when using the class | ||
this.handler = new Function(); | ||
} | ||
/** | ||
* Keeps track of batch records that failed processing | ||
* @param record record that failed processing | ||
* @param exception exception that was thrown | ||
* @returns FailureResponse object with ["fail", exception, original record] | ||
* Method to handle a record that failed processing | ||
* | ||
* This method should be called when a record fails processing so that | ||
* the processor can keep track of the error and the record that failed. | ||
* | ||
* @param record Record that failed processing | ||
* @param error Error that was thrown | ||
*/ | ||
failureHandler(record, exception) { | ||
const entry = ['fail', exception.message, record]; | ||
this.errors.push(exception); | ||
failureHandler(record, error) { | ||
const entry = ['fail', error.message, record]; | ||
this.errors.push(error); | ||
this.failureMessages.push(record); | ||
@@ -29,4 +42,8 @@ return entry; | ||
/** | ||
* Call instance's handler for each record | ||
* @returns List of processed records | ||
* Process all records with an asyncronous handler | ||
* | ||
* Once called, the processor will create an array of promises to process each record | ||
* and wait for all of them to settle before returning the results. | ||
* | ||
* Before and after processing, the processor will call the prepare and clean methods respectively. | ||
*/ | ||
@@ -48,3 +65,11 @@ async process() { | ||
/** | ||
* Call instance's handler for each record | ||
* Orchestrate the processing of a batch of records synchronously | ||
* and sequentially. | ||
* | ||
* The method is responsible for calling the prepare method before | ||
* processing the records and the clean method after processing the records. | ||
* | ||
* In the middle, the method will iterate over the records and call the | ||
* processRecordSync method for each record. | ||
* | ||
* @returns List of processed records | ||
@@ -69,7 +94,15 @@ */ | ||
/** | ||
* Set class instance attributes before execution | ||
* @param records List of records to be processed | ||
* @param handler CallableFunction to process entries of "records" | ||
* @param options Options to be used during processing | ||
* @returns this object | ||
* Set up the processor with the records and the handler | ||
* | ||
* This method should be called before processing the records to | ||
* bind the records and the handler for a specific invocation to | ||
* the processor. | ||
* | ||
* We use a separate method to do this rather than the constructor | ||
* to allow for reusing the processor instance across multiple invocations | ||
* by instantiating the processor outside of the Lambda function handler. | ||
* | ||
* @param records Array of records to be processed | ||
* @param handler CallableFunction to process each record from the batch | ||
* @param options Options to be used during processing (optional) | ||
*/ | ||
@@ -85,6 +118,9 @@ register(records, handler, options) { | ||
/** | ||
* Keeps track of batch records that were processed successfully | ||
* @param record record that succeeded processing | ||
* @param result result from record handler | ||
* @returns SuccessResponse object with ["success", result, original record] | ||
* Method to handle a record that succeeded processing | ||
* | ||
* This method should be called when a record succeeds processing so that | ||
* the processor can keep track of the result and the record that succeeded. | ||
* | ||
* @param record Record that succeeded processing | ||
* @param result Result from record handler | ||
*/ | ||
@@ -91,0 +127,0 @@ successHandler(record, result) { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch asynchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed asynchronously and in parallel. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = async (record: DynamoDBRecord): Promise<void> => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
declare class BatchProcessor extends BasePartialBatchProcessor { | ||
/** | ||
* Handle a record asynchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
processRecord(record: BaseRecord): Promise<SuccessResponse | FailureResponse>; | ||
/** | ||
* Process a record with instance's handler | ||
* @param _record Batch record to be processed | ||
* @returns response of success or failure | ||
* @throws {BatchProcessingError} This method is not implemented for synchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
@@ -13,0 +103,0 @@ processRecordSync(_record: BaseRecord): SuccessResponse | FailureResponse; |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import { BatchProcessingError } from './errors.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch asynchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed asynchronously and in parallel. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = async (record: DynamoDBRecord): Promise<void> => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
class BatchProcessor extends BasePartialBatchProcessor { | ||
/** | ||
* Handle a record asynchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
async processRecord(record) { | ||
@@ -18,5 +108,5 @@ try { | ||
/** | ||
* Process a record with instance's handler | ||
* @param _record Batch record to be processed | ||
* @returns response of success or failure | ||
* @throws {BatchProcessingError} This method is not implemented for synchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
@@ -23,0 +113,0 @@ processRecordSync(_record) { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch synchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed synchronously and in sequence. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.SQS); | ||
* | ||
* const recordHandler = (record: SQSRecord): void => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = (record: KinesisStreamRecord): void => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = (record: DynamoDBRecord): void => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
declare class BatchProcessorSync extends BasePartialBatchProcessor { | ||
/** | ||
* @throws {BatchProcessingError} This method is not implemented for asynchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
processRecord(_record: BaseRecord): Promise<SuccessResponse | FailureResponse>; | ||
/** | ||
* Process a record with instance's handler | ||
* @param record Batch record to be processed | ||
* @returns response of success or failure | ||
* Handle a record synchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
@@ -13,0 +103,0 @@ processRecordSync(record: BaseRecord): SuccessResponse | FailureResponse; |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import { BatchProcessingError } from './errors.js'; | ||
/** | ||
* Process native partial responses from SQS, Kinesis Data Streams, and DynamoDB | ||
* Process records in a batch synchronously and handle partial failure cases. | ||
* | ||
* The batch processor supports processing records coming from Amazon SQS, | ||
* Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
* | ||
* Items are processed synchronously and in sequence. | ||
* | ||
* **Process batch triggered by SQS** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.SQS); | ||
* | ||
* const recordHandler = (record: SQSRecord): void => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by Kinesis Data Streams* | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = (record: KinesisStreamRecord): void => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* **Process batch triggered by DynamoDB Streams** | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessorSync, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessorSync(EventType.DynamoDBStreams); | ||
* | ||
* const recordHandler = (record: DynamoDBRecord): void => { | ||
* const payload = record.dynamodb.NewImage.Message.S; | ||
* }; | ||
* | ||
* export const handler: DynamoDBStreamHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param eventType The type of event to process (SQS, Kinesis, DynamoDB) | ||
*/ | ||
class BatchProcessorSync extends BasePartialBatchProcessor { | ||
/** | ||
* @throws {BatchProcessingError} This method is not implemented for asynchronous processing. | ||
* | ||
* @param _record The record to be processed | ||
*/ | ||
async processRecord(_record) { | ||
@@ -11,5 +92,14 @@ throw new BatchProcessingError('Not implemented. Use process() instead.'); | ||
/** | ||
* Process a record with instance's handler | ||
* @param record Batch record to be processed | ||
* @returns response of success or failure | ||
* Handle a record synchronously with the instance handler provided. | ||
* | ||
* This method implements the abstract method from the parent class, | ||
* and orchestrates the processing of a single record. | ||
* | ||
* First, it converts the record to the appropriate type for the batch processor. | ||
* Then, it calls the handler function with the record data and context. | ||
* | ||
* If the handler function completes successfully, the method returns a success response. | ||
* Otherwise, it returns a failure response with the error that occurred during processing. | ||
* | ||
* @param record The record to be processed | ||
*/ | ||
@@ -16,0 +106,0 @@ processRecordSync(record) { |
import type { DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
import type { PartialItemFailureResponse, EventSourceDataClassTypes } from './types.js'; | ||
/** | ||
* Enum of supported event types for the utility | ||
*/ | ||
declare const EventType: { | ||
@@ -8,3 +11,9 @@ readonly SQS: "SQS"; | ||
}; | ||
/** | ||
* Default response for the partial batch processor | ||
*/ | ||
declare const DEFAULT_RESPONSE: PartialItemFailureResponse; | ||
/** | ||
* Mapping of event types to their respective data classes | ||
*/ | ||
declare const DATA_CLASS_MAPPING: { | ||
@@ -11,0 +20,0 @@ SQS: (record: EventSourceDataClassTypes) => SQSRecord; |
@@ -0,1 +1,4 @@ | ||
/** | ||
* Enum of supported event types for the utility | ||
*/ | ||
const EventType = { | ||
@@ -6,5 +9,11 @@ SQS: 'SQS', | ||
}; | ||
/** | ||
* Default response for the partial batch processor | ||
*/ | ||
const DEFAULT_RESPONSE = { | ||
batchItemFailures: [], | ||
}; | ||
/** | ||
* Mapping of event types to their respective data classes | ||
*/ | ||
const DATA_CLASS_MAPPING = { | ||
@@ -11,0 +20,0 @@ [EventType.SQS]: (record) => record, |
@@ -23,2 +23,9 @@ /** | ||
/** | ||
* Error thrown by the Batch Processing utility when a previous record from | ||
* SQS FIFO queue message group fails processing. | ||
*/ | ||
declare class SqsFifoMessageGroupShortCircuitError extends BatchProcessingError { | ||
constructor(); | ||
} | ||
/** | ||
* Error thrown by the Batch Processing utility when a partial processor receives an unexpected | ||
@@ -30,3 +37,3 @@ * batch type. | ||
} | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, }; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, }; | ||
//# sourceMappingURL=errors.d.ts.map |
@@ -33,2 +33,12 @@ import { EventType } from './constants.js'; | ||
/** | ||
* Error thrown by the Batch Processing utility when a previous record from | ||
* SQS FIFO queue message group fails processing. | ||
*/ | ||
class SqsFifoMessageGroupShortCircuitError extends BatchProcessingError { | ||
constructor() { | ||
super('A previous record from this message group failed processing'); | ||
this.name = 'SqsFifoMessageGroupShortCircuitError'; | ||
} | ||
} | ||
/** | ||
* Error thrown by the Batch Processing utility when a partial processor receives an unexpected | ||
@@ -43,2 +53,2 @@ * batch type. | ||
} | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, }; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, }; |
export { EventType } from './constants.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
@@ -4,0 +4,0 @@ export { BatchProcessorSync } from './BatchProcessorSync.js'; |
export { EventType } from './constants.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BatchProcessingError, FullBatchFailureError, SqsFifoShortCircuitError, SqsFifoMessageGroupShortCircuitError, UnexpectedBatchTypeError, } from './errors.js'; | ||
export { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
@@ -4,0 +4,0 @@ export { BatchProcessorSync } from './BatchProcessorSync.js'; |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, BatchProcessingOptions, PartialItemFailureResponse } from './types.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* Higher level function to process a batch of records asynchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within asynchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records asynchronously | ||
* and in parallel. If you need to process the records synchronously, you can | ||
* use the {@link processPartialResponseSync} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Async function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options | ||
* @returns Lambda Partial Batch Response | ||
*/ | ||
@@ -11,0 +44,0 @@ declare const processPartialResponse: (event: { |
import { UnexpectedBatchTypeError } from './errors.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* Higher level function to process a batch of records asynchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within asynchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records asynchronously | ||
* and in parallel. If you need to process the records synchronously, you can | ||
* use the {@link processPartialResponseSync} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponse, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.KinesisDataStreams); | ||
* | ||
* const recordHandler = async (record: KinesisStreamRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.kinesis.data); | ||
* }; | ||
* | ||
* export const handler: KinesisStreamHandler = async (event, context) => | ||
* processPartialResponse(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Async function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options | ||
* @returns Lambda Partial Batch Response | ||
*/ | ||
@@ -10,0 +43,0 @@ const processPartialResponse = async (event, recordHandler, processor, options) => { |
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
import type { BaseRecord, BatchProcessingOptions, PartialItemFailureResponse } from './types.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* @returns Lambda Partial Batch Response | ||
* Higher level function to process a batch of records synchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within synchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records synchronously | ||
* and in sequence. If you need to process the records asynchronously, you can | ||
* use the {@link processPartialResponse} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* When working with SQS FIFO queues, we will stop processing at the first failure | ||
* and mark unprocessed messages as failed to preserve ordering. However, if you want to | ||
* continue processing messages from different group IDs, you can enable the `skipGroupOnError` | ||
* option for seamless processing of messages from various group IDs. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* SqsFifoPartialProcessor, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new SqsFifoPartialProcessor(); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* skipGroupOnError: true | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Sync function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options, which can vary with chosen batch processor implementation | ||
*/ | ||
declare const processPartialResponseSync: (event: { | ||
declare const processPartialResponseSync: <T extends BasePartialBatchProcessor>(event: { | ||
Records: BaseRecord[]; | ||
}, recordHandler: CallableFunction, processor: BasePartialBatchProcessor, options?: BatchProcessingOptions) => PartialItemFailureResponse; | ||
}, recordHandler: CallableFunction, processor: T, options?: BatchProcessingOptions<T>) => PartialItemFailureResponse; | ||
export { processPartialResponseSync }; | ||
//# sourceMappingURL=processPartialResponseSync.d.ts.map |
import { UnexpectedBatchTypeError } from './errors.js'; | ||
/** | ||
* Higher level function to handle batch event processing | ||
* @param event Lambda's original event | ||
* @param recordHandler Callable function to process each record from the batch | ||
* @param processor Batch processor to handle partial failure cases | ||
* @returns Lambda Partial Batch Response | ||
* Higher level function to process a batch of records synchronously | ||
* and handle partial failure cases. | ||
* | ||
* This function is intended to be used within synchronous Lambda handlers | ||
* and together with a batch processor that implements the {@link BasePartialBatchProcessor} | ||
* interface. | ||
* | ||
* It accepts a batch of records, a record handler function, a batch processor, | ||
* and an optional set of options to configure the batch processing. | ||
* | ||
* By default, the function will process the batch of records synchronously | ||
* and in sequence. If you need to process the records asynchronously, you can | ||
* use the {@link processPartialResponse} function instead. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
* | ||
* When working with SQS FIFO queues, we will stop processing at the first failure | ||
* and mark unprocessed messages as failed to preserve ordering. However, if you want to | ||
* continue processing messages from different group IDs, you can enable the `skipGroupOnError` | ||
* option for seamless processing of messages from various group IDs. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* SqsFifoPartialProcessor, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new SqsFifoPartialProcessor(); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* skipGroupOnError: true | ||
* }); | ||
* ``` | ||
* | ||
* @param event The event object containing the batch of records | ||
* @param recordHandler Sync function to process each record from the batch | ||
* @param processor Batch processor instance to handle the batch processing | ||
* @param options Batch processing options, which can vary with chosen batch processor implementation | ||
*/ | ||
@@ -9,0 +69,0 @@ const processPartialResponseSync = (event, recordHandler, processor, options) => { |
import { BatchProcessorSync } from './BatchProcessorSync.js'; | ||
import type { FailureResponse, SuccessResponse } from './types.js'; | ||
import type { EventSourceDataClassTypes, FailureResponse, SuccessResponse } from './types.js'; | ||
/** | ||
* Process native partial responses from SQS FIFO queues | ||
* Stops processing records when the first record fails | ||
* The remaining records are reported as failed items | ||
* Batch processor for SQS FIFO queues | ||
* | ||
* This class extends the {@link BatchProcessorSync} class and provides | ||
* a mechanism to process records from SQS FIFO queues synchronously. | ||
* | ||
* By default, we will stop processing at the first failure and mark unprocessed messages as failed to preserve ordering. | ||
* | ||
* However, this behavior may not be optimal for customers who wish to proceed with processing messages from a different group ID. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
*/ | ||
declare class SqsFifoPartialProcessor extends BatchProcessorSync { | ||
#private; | ||
constructor(); | ||
/** | ||
* Call instance's handler for each record. | ||
* When the first failed message is detected, the process is short-circuited | ||
* And the remaining messages are reported as failed items | ||
* Handles a failure for a given record. | ||
* Adds the current group ID to the set of failed group IDs if `skipGroupOnError` is true. | ||
* @param record - The record that failed. | ||
* @param exception - The error that occurred. | ||
* @returns The failure response. | ||
*/ | ||
failureHandler(record: EventSourceDataClassTypes, exception: Error): FailureResponse; | ||
/** | ||
* Process a record with a synchronous handler | ||
* | ||
* This method orchestrates the processing of a batch of records synchronously | ||
* for SQS FIFO queues. | ||
* | ||
* The method calls the prepare hook to initialize the processor and then | ||
* iterates over each record in the batch, processing them one by one. | ||
* | ||
* If one of them fails and `skipGroupOnError` is not true, the method short circuits | ||
* the processing and fails the remaining records in the batch. | ||
* | ||
* If one of them fails and `skipGroupOnError` is true, then the method fails the current record | ||
* if the message group has any previous failure, otherwise keeps processing. | ||
* | ||
* Then, it calls the clean hook to clean up the processor and returns the | ||
* processed records. | ||
*/ | ||
processSync(): (SuccessResponse | FailureResponse)[]; | ||
/** | ||
* Starting from the first failure index, fail all remaining messages and append them to the result list | ||
* Starting from the first failure index, fail all remaining messages regardless | ||
* of their group ID. | ||
* | ||
* This short circuit mechanism is used when we detect a failed message in the batch. | ||
* | ||
* Since messages in a FIFO queue are processed in order, we must stop processing any | ||
* remaining messages in the batch to prevent out-of-order processing. | ||
* | ||
* @param firstFailureIndex Index of first message that failed | ||
* @param result List of success and failure responses with remaining messages failed | ||
* @param processedRecords Array of response items that have been processed both successfully and unsuccessfully | ||
*/ | ||
shortCircuitProcessing(firstFailureIndex: number, processedRecords: (SuccessResponse | FailureResponse)[]): (SuccessResponse | FailureResponse)[]; | ||
protected shortCircuitProcessing(firstFailureIndex: number, processedRecords: (SuccessResponse | FailureResponse)[]): (SuccessResponse | FailureResponse)[]; | ||
} | ||
export { SqsFifoPartialProcessor }; | ||
//# sourceMappingURL=SqsFifoPartialProcessor.d.ts.map |
@@ -0,8 +1,46 @@ | ||
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) { | ||
if (kind === "m") throw new TypeError("Private method is not writable"); | ||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter"); | ||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it"); | ||
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value; | ||
}; | ||
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) { | ||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter"); | ||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it"); | ||
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver); | ||
}; | ||
var _SqsFifoPartialProcessor_instances, _SqsFifoPartialProcessor_currentGroupId, _SqsFifoPartialProcessor_failedGroupIds, _SqsFifoPartialProcessor_addToFailedGroup, _SqsFifoPartialProcessor_processFailRecord, _SqsFifoPartialProcessor_setCurrentGroup; | ||
import { BatchProcessorSync } from './BatchProcessorSync.js'; | ||
import { EventType } from './constants.js'; | ||
import { SqsFifoShortCircuitError } from './errors.js'; | ||
import { SqsFifoMessageGroupShortCircuitError, SqsFifoShortCircuitError, } from './errors.js'; | ||
/** | ||
* Process native partial responses from SQS FIFO queues | ||
* Stops processing records when the first record fails | ||
* The remaining records are reported as failed items | ||
* Batch processor for SQS FIFO queues | ||
* | ||
* This class extends the {@link BatchProcessorSync} class and provides | ||
* a mechanism to process records from SQS FIFO queues synchronously. | ||
* | ||
* By default, we will stop processing at the first failure and mark unprocessed messages as failed to preserve ordering. | ||
* | ||
* However, this behavior may not be optimal for customers who wish to proceed with processing messages from a different group ID. | ||
* | ||
* @example | ||
* ```typescript | ||
* import { | ||
* BatchProcessor, | ||
* EventType, | ||
* processPartialResponseSync, | ||
* } from '@aws-lambda-powertools/batch'; | ||
* import type { SQSRecord, SQSHandler } from 'aws-lambda'; | ||
* | ||
* const processor = new BatchProcessor(EventType.SQS); | ||
* | ||
* const recordHandler = async (record: SQSRecord): Promise<void> => { | ||
* const payload = JSON.parse(record.body); | ||
* }; | ||
* | ||
* export const handler: SQSHandler = async (event, context) => | ||
* processPartialResponseSync(event, recordHandler, processor, { | ||
* context, | ||
* }); | ||
* ``` | ||
*/ | ||
@@ -12,8 +50,44 @@ class SqsFifoPartialProcessor extends BatchProcessorSync { | ||
super(EventType.SQS); | ||
_SqsFifoPartialProcessor_instances.add(this); | ||
/** | ||
* The ID of the current message group being processed. | ||
*/ | ||
_SqsFifoPartialProcessor_currentGroupId.set(this, void 0); | ||
/** | ||
* A set of group IDs that have already encountered failures. | ||
*/ | ||
_SqsFifoPartialProcessor_failedGroupIds.set(this, void 0); | ||
__classPrivateFieldSet(this, _SqsFifoPartialProcessor_failedGroupIds, new Set(), "f"); | ||
} | ||
/** | ||
* Call instance's handler for each record. | ||
* When the first failed message is detected, the process is short-circuited | ||
* And the remaining messages are reported as failed items | ||
* Handles a failure for a given record. | ||
* Adds the current group ID to the set of failed group IDs if `skipGroupOnError` is true. | ||
* @param record - The record that failed. | ||
* @param exception - The error that occurred. | ||
* @returns The failure response. | ||
*/ | ||
failureHandler(record, exception) { | ||
if (this.options?.skipGroupOnError && __classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_addToFailedGroup).call(this, __classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")); | ||
} | ||
return super.failureHandler(record, exception); | ||
} | ||
/** | ||
* Process a record with a synchronous handler | ||
* | ||
* This method orchestrates the processing of a batch of records synchronously | ||
* for SQS FIFO queues. | ||
* | ||
* The method calls the prepare hook to initialize the processor and then | ||
* iterates over each record in the batch, processing them one by one. | ||
* | ||
* If one of them fails and `skipGroupOnError` is not true, the method short circuits | ||
* the processing and fails the remaining records in the batch. | ||
* | ||
* If one of them fails and `skipGroupOnError` is true, then the method fails the current record | ||
* if the message group has any previous failure, otherwise keeps processing. | ||
* | ||
* Then, it calls the clean hook to clean up the processor and returns the | ||
* processed records. | ||
*/ | ||
processSync() { | ||
@@ -24,8 +98,18 @@ this.prepare(); | ||
for (const record of this.records) { | ||
// If we have any failed messages, it means the last message failed | ||
// We should then short circuit the process and fail remaining messages | ||
if (this.failureMessages.length != 0) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_setCurrentGroup).call(this, record.attributes?.MessageGroupId); | ||
// If we have any failed messages, we should then short circuit the process and | ||
// fail remaining messages unless `skipGroupOnError` is true | ||
const shouldShortCircuit = !this.options?.skipGroupOnError && this.failureMessages.length !== 0; | ||
if (shouldShortCircuit) { | ||
return this.shortCircuitProcessing(currentIndex, processedRecords); | ||
} | ||
processedRecords.push(this.processRecordSync(record)); | ||
// If `skipGroupOnError` is true and the current group has previously failed, | ||
// then we should skip processing the current group. | ||
const shouldSkipCurrentGroup = this.options?.skipGroupOnError && | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f") && | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_failedGroupIds, "f").has(__classPrivateFieldGet(this, _SqsFifoPartialProcessor_currentGroupId, "f")); | ||
const result = shouldSkipCurrentGroup | ||
? __classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_processFailRecord).call(this, record, new SqsFifoMessageGroupShortCircuitError()) | ||
: this.processRecordSync(record); | ||
processedRecords.push(result); | ||
currentIndex++; | ||
@@ -37,5 +121,12 @@ } | ||
/** | ||
* Starting from the first failure index, fail all remaining messages and append them to the result list | ||
* Starting from the first failure index, fail all remaining messages regardless | ||
* of their group ID. | ||
* | ||
* This short circuit mechanism is used when we detect a failed message in the batch. | ||
* | ||
* Since messages in a FIFO queue are processed in order, we must stop processing any | ||
* remaining messages in the batch to prevent out-of-order processing. | ||
* | ||
* @param firstFailureIndex Index of first message that failed | ||
* @param result List of success and failure responses with remaining messages failed | ||
* @param processedRecords Array of response items that have been processed both successfully and unsuccessfully | ||
*/ | ||
@@ -45,4 +136,3 @@ shortCircuitProcessing(firstFailureIndex, processedRecords) { | ||
for (const record of remainingRecords) { | ||
const data = this.toBatchType(record, this.eventType); | ||
processedRecords.push(this.failureHandler(data, new SqsFifoShortCircuitError())); | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_instances, "m", _SqsFifoPartialProcessor_processFailRecord).call(this, record, new SqsFifoShortCircuitError()); | ||
} | ||
@@ -53,2 +143,10 @@ this.clean(); | ||
} | ||
_SqsFifoPartialProcessor_currentGroupId = new WeakMap(), _SqsFifoPartialProcessor_failedGroupIds = new WeakMap(), _SqsFifoPartialProcessor_instances = new WeakSet(), _SqsFifoPartialProcessor_addToFailedGroup = function _SqsFifoPartialProcessor_addToFailedGroup(group) { | ||
__classPrivateFieldGet(this, _SqsFifoPartialProcessor_failedGroupIds, "f").add(group); | ||
}, _SqsFifoPartialProcessor_processFailRecord = function _SqsFifoPartialProcessor_processFailRecord(record, exception) { | ||
const data = this.toBatchType(record, this.eventType); | ||
return this.failureHandler(data, exception); | ||
}, _SqsFifoPartialProcessor_setCurrentGroup = function _SqsFifoPartialProcessor_setCurrentGroup(group) { | ||
__classPrivateFieldSet(this, _SqsFifoPartialProcessor_currentGroupId, group, "f"); | ||
}; | ||
export { SqsFifoPartialProcessor }; |
import type { Context, DynamoDBRecord, KinesisStreamRecord, SQSRecord } from 'aws-lambda'; | ||
type BatchProcessingOptions = { | ||
context: Context; | ||
import { SqsFifoPartialProcessor } from './SqsFifoPartialProcessor.js'; | ||
import { BasePartialBatchProcessor } from './BasePartialBatchProcessor.js'; | ||
/** | ||
* Options for batch processing | ||
* | ||
* @template T The type of the batch processor, defaults to BasePartialBatchProcessor | ||
* @property context The context object provided by the AWS Lambda runtime | ||
* @property skipGroupOnError The option to group on error during processing | ||
*/ | ||
type BatchProcessingOptions<T = BasePartialBatchProcessor> = { | ||
/** | ||
* The context object provided by the AWS Lambda runtime. When provided, | ||
* it's made available to the handler function you specify | ||
*/ | ||
context?: Context; | ||
/** | ||
* This option is only available for SqsFifoPartialProcessor. | ||
* If true skip the group on error during processing. | ||
*/ | ||
skipGroupOnError?: T extends SqsFifoPartialProcessor ? boolean : never; | ||
}; | ||
/** | ||
* The types of data that can be provided by an event source | ||
*/ | ||
type EventSourceDataClassTypes = SQSRecord | KinesisStreamRecord | DynamoDBRecord; | ||
type RecordValue = unknown; | ||
/** | ||
* Type representing a record from an event source | ||
*/ | ||
type BaseRecord = { | ||
[key: string]: RecordValue; | ||
[key: string]: unknown; | ||
} | EventSourceDataClassTypes; | ||
type ResultType = unknown; | ||
type SuccessResponse = ['success', ResultType, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a successful response | ||
* | ||
* The first element is the string literal 'success', | ||
* the second element is the result of the handler function, | ||
* and the third element is the type of data provided by the event source | ||
*/ | ||
type SuccessResponse = ['success', unknown, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a failure response | ||
* | ||
* The first element is the string literal 'fail', | ||
* the second element is the error message, | ||
* and the third element is the type of data provided by the event source | ||
*/ | ||
type FailureResponse = ['fail', string, EventSourceDataClassTypes]; | ||
/** | ||
* Type representing a partial failure response | ||
*/ | ||
type PartialItemFailures = { | ||
itemIdentifier: string; | ||
}; | ||
/** | ||
* Type representing a partial failure response | ||
*/ | ||
type PartialItemFailureResponse = { | ||
batchItemFailures: PartialItemFailures[]; | ||
}; | ||
export type { BatchProcessingOptions, BaseRecord, EventSourceDataClassTypes, ResultType, SuccessResponse, FailureResponse, PartialItemFailures, PartialItemFailureResponse, }; | ||
export type { BatchProcessingOptions, BaseRecord, EventSourceDataClassTypes, SuccessResponse, FailureResponse, PartialItemFailures, PartialItemFailureResponse, }; | ||
//# sourceMappingURL=types.d.ts.map |
{ | ||
"name": "@aws-lambda-powertools/batch", | ||
"version": "2.1.1", | ||
"version": "2.2.0", | ||
"description": "The batch processing package for the Powertools for AWS Lambda (TypeScript) library.", | ||
@@ -5,0 +5,0 @@ "author": { |
131
README.md
@@ -1,35 +0,11 @@ | ||
# Powertools for AWS Lambda (TypeScript) - Batch Processing Utility <!-- omit in toc --> | ||
# Powertools for AWS Lambda (TypeScript) - Batch Processing Utility | ||
Powertools for AWS Lambda (TypeScript) is a developer toolkit to implement Serverless [best practices and increase developer velocity](https://docs.powertools.aws.dev/lambda/typescript/latest/#features). | ||
You can use the package in both TypeScript and JavaScript code bases. | ||
You can use the library in both TypeScript and JavaScript code bases. | ||
- [Intro](#intro) | ||
- [Key features](#key-features) | ||
- [Usage](#usage) | ||
- [Batch Processor](#batch-processor) | ||
- [SQS Processor](#sqs-processor) | ||
- [Kinesis Processor](#kinesis-processor) | ||
- [DynamoDB Streams Processor](#dynamodb-streams-processor) | ||
- [Async processing](#async-processing) | ||
- [Contribute](#contribute) | ||
- [Roadmap](#roadmap) | ||
- [Connect](#connect) | ||
- [How to support Powertools for AWS Lambda (TypeScript)?](#how-to-support-powertools-for-aws-lambda-typescript) | ||
- [Becoming a reference customer](#becoming-a-reference-customer) | ||
- [Sharing your work](#sharing-your-work) | ||
- [Using Lambda Layer](#using-lambda-layer) | ||
- [Credits](#credits) | ||
- [License](#license) | ||
## Intro | ||
The batch processing utility handles partial failures when processing batches from Amazon SQS, Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
The Batch Processing utility handles partial failures when processing batches from Amazon SQS, Amazon Kinesis Data Streams, and Amazon DynamoDB Streams. | ||
## Key features | ||
* Reports batch item failures to reduce number of retries for a record upon errors | ||
* Simple interface to process each batch record | ||
* Build your own batch processor by extending primitives | ||
## Usage | ||
@@ -40,3 +16,3 @@ | ||
```sh | ||
npm install @aws-lambda-powertools/batch | ||
npm i @aws-lambda-powertools/batch | ||
``` | ||
@@ -63,8 +39,3 @@ | ||
import { Logger } from '@aws-lambda-powertools/logger'; | ||
import type { | ||
SQSEvent, | ||
SQSRecord, | ||
Context, | ||
SQSBatchResponse, | ||
} from 'aws-lambda'; | ||
import type { SQSHandler, SQSRecord } from 'aws-lambda'; | ||
@@ -82,11 +53,6 @@ const processor = new BatchProcessorSync(EventType.SQS); | ||
export const handler = async ( | ||
event: SQSEvent, | ||
context: Context | ||
): Promise<SQSBatchResponse> => { | ||
return processPartialResponseSync(event, recordHandler, processor, { | ||
export const handler: SQSHandler = async (event, context) => | ||
processPartialResponseSync(event, recordHandler, processor, { | ||
context, | ||
}); | ||
}; | ||
export { processor }; | ||
``` | ||
@@ -105,8 +71,3 @@ | ||
import { Logger } from '@aws-lambda-powertools/logger'; | ||
import type { | ||
KinesisStreamEvent, | ||
KinesisStreamRecord, | ||
Context, | ||
KinesisStreamBatchResponse, | ||
} from 'aws-lambda'; | ||
import type { KinesisStreamHandler, KinesisStreamRecord } from 'aws-lambda'; | ||
@@ -122,10 +83,6 @@ const processor = new BatchProcessorSync(EventType.KinesisDataStreams); | ||
export const handler = async ( | ||
event: KinesisStreamEvent, | ||
context: Context | ||
): Promise<KinesisStreamBatchResponse> => { | ||
return processPartialResponseSync(event, recordHandler, processor, { | ||
export const handler: KinesisStreamHandler = async (event, context) => | ||
processPartialResponseSync(event, recordHandler, processor, { | ||
context, | ||
}); | ||
}; | ||
``` | ||
@@ -139,3 +96,3 @@ | ||
import { | ||
BatchProcessorSync, | ||
BatchProcessor, | ||
EventType, | ||
@@ -145,10 +102,5 @@ processPartialResponseSync, | ||
import { Logger } from '@aws-lambda-powertools/logger'; | ||
import type { | ||
DynamoDBStreamEvent, | ||
DynamoDBRecord, | ||
Context, | ||
DynamoDBBatchResponse, | ||
} from 'aws-lambda'; | ||
import type { DynamoDBRecord, DynamoDBStreamHandler } from 'aws-lambda'; | ||
const processor = new BatchProcessorSync(EventType.DynamoDBStreams); | ||
const processor = new BatchProcessor(EventType.DynamoDBStreams); // (1)! | ||
const logger = new Logger(); | ||
@@ -167,10 +119,6 @@ | ||
export const handler = async ( | ||
event: DynamoDBStreamEvent, | ||
context: Context | ||
): Promise<DynamoDBBatchResponse> => { | ||
return processPartialResponseSync(event, recordHandler, processor, { | ||
export const handler: DynamoDBStreamHandler = async (event, context) => | ||
processPartialResponseSync(event, recordHandler, processor, { | ||
context, | ||
}); | ||
}; | ||
``` | ||
@@ -188,8 +136,3 @@ | ||
} from '@aws-lambda-powertools/batch'; | ||
import type { | ||
SQSEvent, | ||
SQSRecord, | ||
Context, | ||
SQSBatchResponse, | ||
} from 'aws-lambda'; | ||
import type { SQSHandler, SQSRecord } from 'aws-lambda'; | ||
@@ -206,10 +149,6 @@ const processor = new BatchProcessor(EventType.SQS); | ||
export const handler = async ( | ||
event: SQSEvent, | ||
context: Context | ||
): Promise<SQSBatchResponse> => { | ||
return await processPartialResponse(event, recordHandler, processor, { | ||
export const handler: SQSHandler = async (event, context) => | ||
await processPartialResponse(event, recordHandler, processor, { | ||
context, | ||
}); | ||
}; | ||
``` | ||
@@ -230,4 +169,4 @@ | ||
* **Powertools for AWS Lambda on Discord**: `#typescript` - **[Invite link](https://discord.gg/B8zZKbbyET)** | ||
* **Email**: aws-lambda-powertools-feedback@amazon.com | ||
- **Powertools for AWS Lambda on Discord**: `#typescript` - **[Invite link](https://discord.gg/B8zZKbbyET)** | ||
- **Email**: <aws-lambda-powertools-feedback@amazon.com> | ||
@@ -238,17 +177,17 @@ ## How to support Powertools for AWS Lambda (TypeScript)? | ||
Knowing which companies are using this library is important to help prioritize the project internally. If your company is using Powertools for AWS Lambda (TypeScript), you can request to have your name and logo added to the README file by raising a [Support Powertools for AWS Lambda (TypeScript) (become a reference)](https://github.com/aws-powertools/powertools-lambda-typescript/issues/new?assignees=&labels=customer-reference&template=support_powertools.yml&title=%5BSupport+Lambda+Powertools%5D%3A+%3Cyour+organization+name%3E) issue. | ||
Knowing which companies are using this library is important to help prioritize the project internally. If your company is using Powertools for AWS Lambda (TypeScript), you can request to have your name and logo added to the README file by raising a [Support Powertools for AWS Lambda (TypeScript) (become a reference)](https://s12d.com/become-reference-pt-ts) issue. | ||
The following companies, among others, use Powertools: | ||
* [Hashnode](https://hashnode.com/) | ||
* [Trek10](https://www.trek10.com/) | ||
* [Elva](https://elva-group.com) | ||
* [globaldatanet](https://globaldatanet.com/) | ||
* [Bailey Nelson](https://www.baileynelson.com.au) | ||
* [Perfect Post](https://www.perfectpost.fr) | ||
* [Sennder](https://sennder.com/) | ||
* [Certible](https://www.certible.com/) | ||
* [tecRacer GmbH & Co. KG](https://www.tecracer.com/) | ||
* [AppYourself](https://appyourself.net) | ||
* [Alma Media](https://www.almamedia.fi) | ||
- [Hashnode](https://hashnode.com/) | ||
- [Trek10](https://www.trek10.com/) | ||
- [Elva](https://elva-group.com) | ||
- [globaldatanet](https://globaldatanet.com/) | ||
- [Bailey Nelson](https://www.baileynelson.com.au) | ||
- [Perfect Post](https://www.perfectpost.fr) | ||
- [Sennder](https://sennder.com/) | ||
- [Certible](https://www.certible.com/) | ||
- [tecRacer GmbH & Co. KG](https://www.tecracer.com/) | ||
- [AppYourself](https://appyourself.net) | ||
- [Alma Media](https://www.almamedia.fi) | ||
@@ -261,10 +200,6 @@ ### Sharing your work | ||
This helps us understand who uses Powertools for AWS Lambda (TypeScript) in a non-intrusive way, and helps us gain future investments for other Powertools for AWS Lambda languages. When [using Layers](https://docs.powertools.aws.dev/lambda/typescript/latest/#lambda-layer), you can add Powertools as a dev dependency (or as part of your virtual env) to not impact the development process. | ||
This helps us understand who uses Powertools for AWS Lambda (TypeScript) in a non-intrusive way, and helps us gain future investments for other Powertools for AWS Lambda languages. When [using Layers](https://docs.powertools.aws.dev/lambda/typescript/latest/#lambda-layer), you can add Powertools as a dev dependency to not impact the development process. | ||
## Credits | ||
Credits for the Lambda Powertools for AWS Lambda (TypeScript) idea go to [DAZN](https://github.com/getndazn) and their [DAZN Lambda Powertools](https://github.com/getndazn/dazn-lambda-powertools/). | ||
## License | ||
This library is licensed under the MIT-0 License. See the LICENSE file. |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
162778
88.09%3424
136.3%193
-25.19%1
Infinity%