New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

@aws-sdk/client-firehose

Package Overview
Dependencies
Maintainers
5
Versions
435
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@aws-sdk/client-firehose - npm Package Compare versions

Comparing version 3.687.0 to 3.688.0

47

dist-es/models/models_0.js

@@ -73,2 +73,10 @@ import { SENSITIVE_STRING } from "@smithy/smithy-client";

};
export const SSLMode = {
Disabled: "Disabled",
Enabled: "Enabled",
};
export const DatabaseType = {
MySQL: "MySQL",
PostgreSQL: "PostgreSQL",
};
export const KeyType = {

@@ -79,2 +87,3 @@ AWS_OWNED_CMK: "AWS_OWNED_CMK",

export const DeliveryStreamType = {
DatabaseAsSource: "DatabaseAsSource",
DirectPut: "DirectPut",

@@ -195,14 +204,2 @@ KinesisStreamAsSource: "KinesisStreamAsSource",

}
export class ResourceNotFoundException extends __BaseException {
constructor(opts) {
super({
name: "ResourceNotFoundException",
$fault: "client",
...opts,
});
this.name = "ResourceNotFoundException";
this.$fault = "client";
Object.setPrototypeOf(this, ResourceNotFoundException.prototype);
}
}
export const DeliveryStreamFailureType = {

@@ -224,3 +221,26 @@ CREATE_ENI_FAILED: "CREATE_ENI_FAILED",

UNKNOWN_ERROR: "UNKNOWN_ERROR",
VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND",
VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED: "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED",
};
export const SnapshotRequestedBy = {
FIREHOSE: "FIREHOSE",
USER: "USER",
};
export const SnapshotStatus = {
COMPLETE: "COMPLETE",
IN_PROGRESS: "IN_PROGRESS",
SUSPENDED: "SUSPENDED",
};
export class ResourceNotFoundException extends __BaseException {
constructor(opts) {
super({
name: "ResourceNotFoundException",
$fault: "client",
...opts,
});
this.name = "ResourceNotFoundException";
this.$fault = "client";
Object.setPrototypeOf(this, ResourceNotFoundException.prototype);
}
}
export const DeliveryStreamEncryptionStatus = {

@@ -387,5 +407,2 @@ DISABLED: "DISABLED",

...obj,
...(obj.DeliveryStreamDescription && {
DeliveryStreamDescription: DeliveryStreamDescriptionFilterSensitiveLog(obj.DeliveryStreamDescription),
}),
});

@@ -392,0 +409,0 @@ export const HttpEndpointDestinationUpdateFilterSensitiveLog = (obj) => ({

@@ -350,2 +350,3 @@ import { loadRestJsonErrorCode, parseJsonBody as parseBody, parseJsonErrorBody as parseErrorBody } from "@aws-sdk/core";

AmazonopensearchserviceDestinationConfiguration: _json,
DatabaseSourceConfiguration: _json,
DeliveryStreamEncryptionConfigurationInput: _json,

@@ -487,2 +488,36 @@ DeliveryStreamName: [],

};
const de_DatabaseSnapshotInfo = (output, context) => {
return take(output, {
FailureDescription: _json,
Id: __expectString,
RequestTimestamp: (_) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))),
RequestedBy: __expectString,
Status: __expectString,
Table: __expectString,
});
};
const de_DatabaseSnapshotInfoList = (output, context) => {
const retVal = (output || [])
.filter((e) => e != null)
.map((entry) => {
return de_DatabaseSnapshotInfo(entry, context);
});
return retVal;
};
const de_DatabaseSourceDescription = (output, context) => {
return take(output, {
Columns: _json,
DatabaseSourceAuthenticationConfiguration: _json,
DatabaseSourceVPCConfiguration: _json,
Databases: _json,
Endpoint: __expectString,
Port: __expectInt32,
SSLMode: __expectString,
SnapshotInfo: (_) => de_DatabaseSnapshotInfoList(_, context),
SnapshotWatermarkTable: __expectString,
SurrogateKeys: _json,
Tables: _json,
Type: __expectString,
});
};
const de_DataFormatConversionConfiguration = (output, context) => {

@@ -602,2 +637,3 @@ return take(output, {

return take(output, {
DatabaseSourceDescription: (_) => de_DatabaseSourceDescription(_, context),
KinesisStreamSourceDescription: (_) => de_KinesisStreamSourceDescription(_, context),

@@ -604,0 +640,0 @@ MSKSourceDescription: (_) => de_MSKSourceDescription(_, context),

@@ -30,16 +30,16 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Creates a Firehose delivery stream.</p>
* <p>By default, you can create up to 50 delivery streams per Amazon Web Services
* <p>Creates a Firehose stream.</p>
* <p>By default, you can create up to 50 Firehose streams per Amazon Web Services
* Region.</p>
* <p>This is an asynchronous operation that immediately returns. The initial status of the
* delivery stream is <code>CREATING</code>. After the delivery stream is created, its status
* is <code>ACTIVE</code> and it now accepts data. If the delivery stream creation fails, the
* Firehose stream is <code>CREATING</code>. After the Firehose stream is created, its status
* is <code>ACTIVE</code> and it now accepts data. If the Firehose stream creation fails, the
* status transitions to <code>CREATING_FAILED</code>. Attempts to send data to a delivery
* stream that is not in the <code>ACTIVE</code> state cause an exception. To check the state
* of a delivery stream, use <a>DescribeDeliveryStream</a>.</p>
* <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status
* of a Firehose stream, use <a>DescribeDeliveryStream</a>.</p>
* <p>If the status of a Firehose stream is <code>CREATING_FAILED</code>, this status
* doesn't change, and you can't invoke <code>CreateDeliveryStream</code> again on it.
* However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete
* it.</p>
* <p>A Firehose delivery stream can be configured to receive records directly
* <p>A Firehose stream can be configured to receive records directly
* from providers using <a>PutRecord</a> or <a>PutRecordBatch</a>, or it

@@ -51,6 +51,6 @@ * can be configured to use an existing Kinesis stream as its source. To specify a Kinesis

* parameter.</p>
* <p>To create a delivery stream with server-side encryption (SSE) enabled, include <a>DeliveryStreamEncryptionConfigurationInput</a> in your request. This is
* <p>To create a Firehose stream with server-side encryption (SSE) enabled, include <a>DeliveryStreamEncryptionConfigurationInput</a> in your request. This is
* optional. You can also invoke <a>StartDeliveryStreamEncryption</a> to turn on
* SSE for an existing delivery stream that doesn't have SSE enabled.</p>
* <p>A delivery stream is configured with a single destination, such as Amazon Simple
* SSE for an existing Firehose stream that doesn't have SSE enabled.</p>
* <p>A Firehose stream is configured with a single destination, such as Amazon Simple
* Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch

@@ -107,3 +107,3 @@ * Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by

* DeliveryStreamName: "STRING_VALUE", // required
* DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource",
* DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource",
* KinesisStreamSourceConfiguration: { // KinesisStreamSourceConfiguration

@@ -587,5 +587,18 @@ * KinesisStreamARN: "STRING_VALUE", // required

* ],
* PartitionSpec: { // PartitionSpec
* Identity: [ // PartitionFields
* { // PartitionField
* SourceName: "STRING_VALUE", // required
* },
* ],
* },
* S3ErrorOutputPrefix: "STRING_VALUE",
* },
* ],
* SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration
* Enabled: true || false, // required
* },
* TableCreationConfiguration: { // TableCreationConfiguration
* Enabled: true || false, // required
* },
* BufferingHints: "<BufferingHints>",

@@ -601,5 +614,50 @@ * CloudWatchLoggingOptions: "<CloudWatchLoggingOptions>",

* CatalogARN: "STRING_VALUE",
* WarehouseLocation: "STRING_VALUE",
* },
* S3Configuration: "<S3DestinationConfiguration>", // required
* },
* DatabaseSourceConfiguration: { // DatabaseSourceConfiguration
* Type: "MySQL" || "PostgreSQL", // required
* Endpoint: "STRING_VALUE", // required
* Port: Number("int"), // required
* SSLMode: "Disabled" || "Enabled",
* Databases: { // DatabaseList
* Include: [ // DatabaseIncludeOrExcludeList
* "STRING_VALUE",
* ],
* Exclude: [
* "STRING_VALUE",
* ],
* },
* Tables: { // DatabaseTableList
* Include: [ // DatabaseTableIncludeOrExcludeList
* "STRING_VALUE",
* ],
* Exclude: [
* "STRING_VALUE",
* ],
* },
* Columns: { // DatabaseColumnList
* Include: [ // DatabaseColumnIncludeOrExcludeList
* "STRING_VALUE",
* ],
* Exclude: [
* "STRING_VALUE",
* ],
* },
* SurrogateKeys: [ // DatabaseSurrogateKeyList
* "STRING_VALUE",
* ],
* SnapshotWatermarkTable: "STRING_VALUE", // required
* DatabaseSourceAuthenticationConfiguration: { // DatabaseSourceAuthenticationConfiguration
* SecretsManagerConfiguration: {
* SecretARN: "STRING_VALUE",
* RoleARN: "STRING_VALUE",
* Enabled: true || false, // required
* },
* },
* DatabaseSourceVPCConfiguration: { // DatabaseSourceVPCConfiguration
* VpcEndpointServiceName: "STRING_VALUE", // required
* },
* },
* };

@@ -625,3 +683,3 @@ * const command = new CreateDeliveryStreamCommand(input);

* <p>Firehose throws this exception when an attempt to put records or to start
* or stop delivery stream encryption fails. This happens when the KMS service throws one of
* or stop Firehose stream encryption fails. This happens when the KMS service throws one of
* the following exception types: <code>AccessDeniedException</code>,

@@ -628,0 +686,0 @@ * <code>InvalidStateException</code>, <code>DisabledException</code>, or

@@ -30,13 +30,13 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Deletes a delivery stream and its data.</p>
* <p>You can delete a delivery stream only if it is in one of the following states:
* <p>Deletes a Firehose stream and its data.</p>
* <p>You can delete a Firehose stream only if it is in one of the following states:
* <code>ACTIVE</code>, <code>DELETING</code>, <code>CREATING_FAILED</code>, or
* <code>DELETING_FAILED</code>. You can't delete a delivery stream that is in the
* <code>CREATING</code> state. To check the state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p>
* <p>DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the
* <code>DELETING</code> state.While the delivery stream is in the <code>DELETING</code> state, the service might
* <code>DELETING_FAILED</code>. You can't delete a Firehose stream that is in the
* <code>CREATING</code> state. To check the state of a Firehose stream, use <a>DescribeDeliveryStream</a>. </p>
* <p>DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream is marked for deletion, and it goes into the
* <code>DELETING</code> state.While the Firehose stream is in the <code>DELETING</code> state, the service might
* continue to accept records, but it doesn't make any guarantees with respect to delivering
* the data. Therefore, as a best practice, first stop any applications that are sending
* records before you delete a delivery stream.</p>
* <p>Removal of a delivery stream that is in the <code>DELETING</code> state is a low priority operation for the service. A stream may remain in the
* records before you delete a Firehose stream.</p>
* <p>Removal of a Firehose stream that is in the <code>DELETING</code> state is a low priority operation for the service. A stream may remain in the
* <code>DELETING</code> state for several minutes. Therefore, as a best practice, applications should not wait for streams in the <code>DELETING</code> state

@@ -43,0 +43,0 @@ * to be removed. </p>

@@ -30,6 +30,6 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Describes the specified delivery stream and its status. For example, after your
* delivery stream is created, call <code>DescribeDeliveryStream</code> to see whether the
* delivery stream is <code>ACTIVE</code> and therefore ready for data to be sent to it. </p>
* <p>If the status of a delivery stream is <code>CREATING_FAILED</code>, this status
* <p>Describes the specified Firehose stream and its status. For example, after your
* Firehose stream is created, call <code>DescribeDeliveryStream</code> to see whether the
* Firehose stream is <code>ACTIVE</code> and therefore ready for data to be sent to it. </p>
* <p>If the status of a Firehose stream is <code>CREATING_FAILED</code>, this status
* doesn't change, and you can't invoke <a>CreateDeliveryStream</a> again on it.

@@ -57,3 +57,3 @@ * However, you can invoke the <a>DeleteDeliveryStream</a> operation to delete it.

* // FailureDescription: { // FailureDescription
* // Type: "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required
* // Type: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" || "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" || "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required
* // Details: "STRING_VALUE", // required

@@ -66,7 +66,7 @@ * // },

* // FailureDescription: {
* // Type: "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required
* // Type: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" || "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" || "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required
* // Details: "STRING_VALUE", // required
* // },
* // },
* // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", // required
* // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource", // required
* // VersionId: "STRING_VALUE", // required

@@ -91,2 +91,56 @@ * // CreateTimestamp: new Date("TIMESTAMP"),

* // },
* // DatabaseSourceDescription: { // DatabaseSourceDescription
* // Type: "MySQL" || "PostgreSQL",
* // Endpoint: "STRING_VALUE",
* // Port: Number("int"),
* // SSLMode: "Disabled" || "Enabled",
* // Databases: { // DatabaseList
* // Include: [ // DatabaseIncludeOrExcludeList
* // "STRING_VALUE",
* // ],
* // Exclude: [
* // "STRING_VALUE",
* // ],
* // },
* // Tables: { // DatabaseTableList
* // Include: [ // DatabaseTableIncludeOrExcludeList
* // "STRING_VALUE",
* // ],
* // Exclude: [
* // "STRING_VALUE",
* // ],
* // },
* // Columns: { // DatabaseColumnList
* // Include: [ // DatabaseColumnIncludeOrExcludeList
* // "STRING_VALUE",
* // ],
* // Exclude: [
* // "STRING_VALUE",
* // ],
* // },
* // SurrogateKeys: [
* // "STRING_VALUE",
* // ],
* // SnapshotWatermarkTable: "STRING_VALUE",
* // SnapshotInfo: [ // DatabaseSnapshotInfoList
* // { // DatabaseSnapshotInfo
* // Id: "STRING_VALUE", // required
* // Table: "STRING_VALUE", // required
* // RequestTimestamp: new Date("TIMESTAMP"), // required
* // RequestedBy: "USER" || "FIREHOSE", // required
* // Status: "IN_PROGRESS" || "COMPLETE" || "SUSPENDED", // required
* // FailureDescription: "<FailureDescription>",
* // },
* // ],
* // DatabaseSourceAuthenticationConfiguration: { // DatabaseSourceAuthenticationConfiguration
* // SecretsManagerConfiguration: { // SecretsManagerConfiguration
* // SecretARN: "STRING_VALUE",
* // RoleARN: "STRING_VALUE",
* // Enabled: true || false, // required
* // },
* // },
* // DatabaseSourceVPCConfiguration: { // DatabaseSourceVPCConfiguration
* // VpcEndpointServiceName: "STRING_VALUE", // required
* // },
* // },
* // },

@@ -310,3 +364,3 @@ * // Destinations: [ // DestinationDescriptionList // required

* // CloudWatchLoggingOptions: "<CloudWatchLoggingOptions>",
* // SecretsManagerConfiguration: { // SecretsManagerConfiguration
* // SecretsManagerConfiguration: {
* // SecretARN: "STRING_VALUE",

@@ -551,5 +605,18 @@ * // RoleARN: "STRING_VALUE",

* // ],
* // PartitionSpec: { // PartitionSpec
* // Identity: [ // PartitionFields
* // { // PartitionField
* // SourceName: "STRING_VALUE", // required
* // },
* // ],
* // },
* // S3ErrorOutputPrefix: "STRING_VALUE",
* // },
* // ],
* // SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration
* // Enabled: true || false, // required
* // },
* // TableCreationConfiguration: { // TableCreationConfiguration
* // Enabled: true || false, // required
* // },
* // BufferingHints: "<BufferingHints>",

@@ -565,2 +632,3 @@ * // CloudWatchLoggingOptions: "<CloudWatchLoggingOptions>",

* // CatalogARN: "STRING_VALUE",
* // WarehouseLocation: "STRING_VALUE",
* // },

@@ -567,0 +635,0 @@ * // S3DestinationDescription: "<S3DestinationDescription>",

@@ -30,10 +30,10 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Lists your delivery streams in alphabetical order of their names.</p>
* <p>The number of delivery streams might be too large to return using a single call to
* <code>ListDeliveryStreams</code>. You can limit the number of delivery streams returned,
* <p>Lists your Firehose streams in alphabetical order of their names.</p>
* <p>The number of Firehose streams might be too large to return using a single call to
* <code>ListDeliveryStreams</code>. You can limit the number of Firehose streams returned,
* using the <code>Limit</code> parameter. To determine whether there are more delivery
* streams to list, check the value of <code>HasMoreDeliveryStreams</code> in the output. If
* there are more delivery streams to list, you can request them by calling this operation
* there are more Firehose streams to list, you can request them by calling this operation
* again and setting the <code>ExclusiveStartDeliveryStreamName</code> parameter to the name
* of the last delivery stream returned in the last call.</p>
* of the last Firehose stream returned in the last call.</p>
* @example

@@ -47,3 +47,3 @@ * Use a bare-bones client and the command you need to make an API call.

* Limit: Number("int"),
* DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource",
* DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource",
* ExclusiveStartDeliveryStreamName: "STRING_VALUE",

@@ -50,0 +50,0 @@ * };

@@ -30,3 +30,3 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Lists the tags for the specified delivery stream. This operation has a limit of five
* <p>Lists the tags for the specified Firehose stream. This operation has a limit of five
* transactions per second per account. </p>

@@ -33,0 +33,0 @@ * @example

@@ -30,7 +30,7 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Writes multiple data records into a delivery stream in a single call, which can
* <p>Writes multiple data records into a Firehose stream in a single call, which can
* achieve higher throughput per producer than when writing single records. To write single
* data records into a delivery stream, use <a>PutRecord</a>. Applications using
* data records into a Firehose stream, use <a>PutRecord</a>. Applications using
* these operations are referred to as producers.</p>
* <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
* <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
* <p>For information about service quota, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose

@@ -41,5 +41,10 @@ * Quota</a>.</p>

* for the entire request. These limits cannot be changed.</p>
* <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
* <p>You must specify the name of the Firehose stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
* KB in size, and any kind of data. For example, it could be a segment from a log file,
* geographic location data, website clickstream data, and so on.</p>
* <p>For multi record de-aggregation, you can not put more than 500 records even if the
* data blob length is less than 1000 KiB. If you include more than 500 records, the request
* succeeds but the record de-aggregation doesn't work as expected and transformation lambda
* is invoked with the complete base64 encoded data blob instead of de-aggregated base64
* decoded records.</p>
* <p>Firehose buffers records before delivering them to the destination. To

@@ -74,3 +79,3 @@ * disambiguate the data blobs at the destination, a common solution is to use delimiters in

* the API is automatically reinvoked (retried) 3 times. If the exception persists, it is
* possible that the throughput limits have been exceeded for the delivery stream.</p>
* possible that the throughput limits have been exceeded for the Firehose stream.</p>
* <p>Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can

@@ -80,3 +85,3 @@ * result in data duplicates. For larger data assets, allow for a longer time out before

* <p>Data records sent to Firehose are stored for 24 hours from the time they
* are added to a delivery stream as it attempts to send the records to the destination. If
* are added to a Firehose stream as it attempts to send the records to the destination. If
* the destination is unreachable for more than 24 hours, the data is no longer

@@ -129,3 +134,3 @@ * available.</p>

* <p>Firehose throws this exception when an attempt to put records or to start
* or stop delivery stream encryption fails. This happens when the KMS service throws one of
* or stop Firehose stream encryption fails. This happens when the KMS service throws one of
* the following exception types: <code>AccessDeniedException</code>,

@@ -143,3 +148,3 @@ * <code>InvalidStateException</code>, <code>DisabledException</code>, or

* <p>The service is unavailable. Back off and retry the operation. If you continue to see
* the exception, throughput limits for the delivery stream may have been exceeded. For more
* the exception, throughput limits for the Firehose stream may have been exceeded. For more
* information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose

@@ -146,0 +151,0 @@ * Limits</a>.</p>

@@ -30,15 +30,20 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Writes a single data record into an Amazon Firehose delivery stream. To
* write multiple data records into a delivery stream, use <a>PutRecordBatch</a>.
* <p>Writes a single data record into an Firehose stream. To
* write multiple data records into a Firehose stream, use <a>PutRecordBatch</a>.
* Applications using these operations are referred to as producers.</p>
* <p>By default, each delivery stream can take in up to 2,000 transactions per second,
* <p>By default, each Firehose stream can take in up to 2,000 transactions per second,
* 5,000 records per second, or 5 MB per second. If you use <a>PutRecord</a> and
* <a>PutRecordBatch</a>, the limits are an aggregate across these two
* operations for each delivery stream. For more information about limits and how to request
* operations for each Firehose stream. For more information about limits and how to request
* an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon
* Firehose Limits</a>. </p>
* <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
* <p>You must specify the name of the delivery stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
* <p>Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.</p>
* <p>You must specify the name of the Firehose stream and the data record when using <a>PutRecord</a>. The data record consists of a data blob that can be up to 1,000
* KiB in size, and any kind of data. For example, it can be a segment from a log file,
* geographic location data, website clickstream data, and so on.</p>
* <p>For multi record de-aggregation, you can not put more than 500 records even if the
* data blob length is less than 1000 KiB. If you include more than 500 records, the request
* succeeds but the record de-aggregation doesn't work as expected and transformation lambda
* is invoked with the complete base64 encoded data blob instead of de-aggregated base64
* decoded records.</p>
* <p>Firehose buffers records before delivering them to the destination. To

@@ -55,3 +60,3 @@ * disambiguate the data blobs at the destination, a common solution is to use delimiters in

* times. If the exception persists, it is possible that the throughput limits have been
* exceeded for the delivery stream. </p>
* exceeded for the Firehose stream. </p>
* <p>Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can

@@ -61,3 +66,3 @@ * result in data duplicates. For larger data assets, allow for a longer time out before

* <p>Data records sent to Firehose are stored for 24 hours from the time they
* are added to a delivery stream as it tries to send the records to the destination. If the
* are added to a Firehose stream as it tries to send the records to the destination. If the
* destination is unreachable for more than 24 hours, the data is no longer

@@ -101,3 +106,3 @@ * available.</p>

* <p>Firehose throws this exception when an attempt to put records or to start
* or stop delivery stream encryption fails. This happens when the KMS service throws one of
* or stop Firehose stream encryption fails. This happens when the KMS service throws one of
* the following exception types: <code>AccessDeniedException</code>,

@@ -115,3 +120,3 @@ * <code>InvalidStateException</code>, <code>DisabledException</code>, or

* <p>The service is unavailable. Back off and retry the operation. If you continue to see
* the exception, throughput limits for the delivery stream may have been exceeded. For more
* the exception, throughput limits for the Firehose stream may have been exceeded. For more
* information about limits and how to request an increase, see <a href="https://docs.aws.amazon.com/firehose/latest/dev/limits.html">Amazon Firehose

@@ -118,0 +123,0 @@ * Limits</a>.</p>

@@ -30,15 +30,15 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Enables server-side encryption (SSE) for the delivery stream. </p>
* <p>Enables server-side encryption (SSE) for the Firehose stream. </p>
* <p>This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to <code>ENABLING</code>, and then
* to <code>ENABLED</code>. The encryption status of a delivery stream is the
* to <code>ENABLED</code>. The encryption status of a Firehose stream is the
* <code>Status</code> property in <a>DeliveryStreamEncryptionConfiguration</a>.
* If the operation fails, the encryption status changes to <code>ENABLING_FAILED</code>. You
* can continue to read and write data to your delivery stream while the encryption status is
* can continue to read and write data to your Firehose stream while the encryption status is
* <code>ENABLING</code>, but the data is not encrypted. It can take up to 5 seconds after
* the encryption status changes to <code>ENABLED</code> before all records written to the
* delivery stream are encrypted. To find out whether a record or a batch of records was
* Firehose stream are encrypted. To find out whether a record or a batch of records was
* encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and
* <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p>
* <p>To check the encryption status of a delivery stream, use <a>DescribeDeliveryStream</a>.</p>
* <p>Even if encryption is currently enabled for a delivery stream, you can still invoke this
* <p>To check the encryption status of a Firehose stream, use <a>DescribeDeliveryStream</a>.</p>
* <p>Even if encryption is currently enabled for a Firehose stream, you can still invoke this
* operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this

@@ -53,17 +53,17 @@ * method to change the CMK, and the old CMK is of type <code>CUSTOMER_MANAGED_CMK</code>,

* not be called with session credentials that are more than 6 hours old.</p>
* <p>If a delivery stream already has encryption enabled and then you invoke this operation
* <p>If a Firehose stream already has encryption enabled and then you invoke this operation
* to change the ARN of the CMK or both its type and ARN and you get
* <code>ENABLING_FAILED</code>, this only means that the attempt to change the CMK failed.
* In this case, encryption remains enabled with the old CMK.</p>
* <p>If the encryption status of your delivery stream is <code>ENABLING_FAILED</code>, you
* <p>If the encryption status of your Firehose stream is <code>ENABLING_FAILED</code>, you
* can invoke this operation again with a valid CMK. The CMK must be enabled and the key
* policy mustn't explicitly deny the permission for Firehose to invoke KMS
* encrypt and decrypt operations.</p>
* <p>You can enable SSE for a delivery stream only if it's a delivery stream that uses
* <p>You can enable SSE for a Firehose stream only if it's a Firehose stream that uses
* <code>DirectPut</code> as its source. </p>
* <p>The <code>StartDeliveryStreamEncryption</code> and
* <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls
* per delivery stream per 24 hours. For example, you reach the limit if you call
* per Firehose stream per 24 hours. For example, you reach the limit if you call
* <code>StartDeliveryStreamEncryption</code> 13 times and
* <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a
* <code>StopDeliveryStreamEncryption</code> 12 times for the same Firehose stream in a
* 24-hour period.</p>

@@ -100,3 +100,3 @@ * @example

* <p>Firehose throws this exception when an attempt to put records or to start
* or stop delivery stream encryption fails. This happens when the KMS service throws one of
* or stop Firehose stream encryption fails. This happens when the KMS service throws one of
* the following exception types: <code>AccessDeniedException</code>,

@@ -103,0 +103,0 @@ * <code>InvalidStateException</code>, <code>DisabledException</code>, or

@@ -30,11 +30,11 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Disables server-side encryption (SSE) for the delivery stream. </p>
* <p>Disables server-side encryption (SSE) for the Firehose stream. </p>
* <p>This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to <code>DISABLING</code>, and then
* to <code>DISABLED</code>. You can continue to read and write data to your stream while its
* status is <code>DISABLING</code>. It can take up to 5 seconds after the encryption status
* changes to <code>DISABLED</code> before all records written to the delivery stream are no
* changes to <code>DISABLED</code> before all records written to the Firehose stream are no
* longer subject to encryption. To find out whether a record or a batch of records was
* encrypted, check the response elements <a>PutRecordOutput$Encrypted</a> and
* <a>PutRecordBatchOutput$Encrypted</a>, respectively.</p>
* <p>To check the encryption state of a delivery stream, use <a>DescribeDeliveryStream</a>. </p>
* <p>To check the encryption state of a Firehose stream, use <a>DescribeDeliveryStream</a>. </p>
* <p>If SSE is enabled using a customer managed CMK and then you invoke

@@ -46,5 +46,5 @@ * <code>StopDeliveryStreamEncryption</code>, Firehose schedules the related

* <code>StopDeliveryStreamEncryption</code> operations have a combined limit of 25 calls
* per delivery stream per 24 hours. For example, you reach the limit if you call
* per Firehose stream per 24 hours. For example, you reach the limit if you call
* <code>StartDeliveryStreamEncryption</code> 13 times and
* <code>StopDeliveryStreamEncryption</code> 12 times for the same delivery stream in a
* <code>StopDeliveryStreamEncryption</code> 12 times for the same Firehose stream in a
* 24-hour period.</p>

@@ -51,0 +51,0 @@ * @example

@@ -30,11 +30,11 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Adds or updates tags for the specified delivery stream. A tag is a key-value pair
* <p>Adds or updates tags for the specified Firehose stream. A tag is a key-value pair
* that you can define and assign to Amazon Web Services resources. If you specify a tag that
* already exists, the tag value is replaced with the value that you specify in the request.
* Tags are metadata. For example, you can add friendly names and descriptions or other types
* of information that can help you distinguish the delivery stream. For more information
* of information that can help you distinguish the Firehose stream. For more information
* about tags, see <a href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Using Cost Allocation
* Tags</a> in the <i>Amazon Web Services Billing and Cost Management User
* Guide</i>. </p>
* <p>Each delivery stream can have up to 50 tags. </p>
* <p>Each Firehose stream can have up to 50 tags. </p>
* <p>This operation has a limit of five transactions per second per account. </p>

@@ -41,0 +41,0 @@ * @example

@@ -30,3 +30,3 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Removes tags from the specified delivery stream. Removed tags are deleted, and you
* <p>Removes tags from the specified Firehose stream. Removed tags are deleted, and you
* can't recover them after this operation successfully completes.</p>

@@ -33,0 +33,0 @@ * <p>If you specify a tag that doesn't exist, the operation ignores it.</p>

@@ -30,8 +30,8 @@ import { Command as $Command } from "@smithy/smithy-client";

/**
* <p>Updates the specified destination of the specified delivery stream.</p>
* <p>Updates the specified destination of the specified Firehose stream.</p>
* <p>Use this operation to change the destination type (for example, to replace the Amazon
* S3 destination with Amazon Redshift) or change the parameters associated with a destination
* (for example, to change the bucket name of the Amazon S3 destination). The update might not
* occur immediately. The target delivery stream remains active while the configurations are
* updated, so data writes to the delivery stream can continue during this process. The
* occur immediately. The target Firehose stream remains active while the configurations are
* updated, so data writes to the Firehose stream can continue during this process. The
* updated configurations are usually effective within a few minutes.</p>

@@ -486,5 +486,18 @@ * <p>Switching between Amazon OpenSearch Service and other services is not supported. For

* ],
* PartitionSpec: { // PartitionSpec
* Identity: [ // PartitionFields
* { // PartitionField
* SourceName: "STRING_VALUE", // required
* },
* ],
* },
* S3ErrorOutputPrefix: "STRING_VALUE",
* },
* ],
* SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration
* Enabled: true || false, // required
* },
* TableCreationConfiguration: { // TableCreationConfiguration
* Enabled: true || false, // required
* },
* BufferingHints: "<BufferingHints>",

@@ -500,2 +513,3 @@ * CloudWatchLoggingOptions: "<CloudWatchLoggingOptions>",

* CatalogARN: "STRING_VALUE",
* WarehouseLocation: "STRING_VALUE",
* },

@@ -502,0 +516,0 @@ * S3Configuration: { // S3DestinationConfiguration

@@ -245,2 +245,3 @@ import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";

CatalogARN?: string;
WarehouseLocation?: string;
}

@@ -268,2 +269,50 @@ export declare class ConcurrentModificationException extends __BaseException {

}
export interface DatabaseColumnList {
Include?: string[];
Exclude?: string[];
}
export interface DatabaseList {
Include?: string[];
Exclude?: string[];
}
export interface SecretsManagerConfiguration {
SecretARN?: string;
RoleARN?: string;
Enabled: boolean | undefined;
}
export interface DatabaseSourceAuthenticationConfiguration {
SecretsManagerConfiguration: SecretsManagerConfiguration | undefined;
}
export interface DatabaseSourceVPCConfiguration {
VpcEndpointServiceName: string | undefined;
}
export declare const SSLMode: {
readonly Disabled: "Disabled";
readonly Enabled: "Enabled";
};
export type SSLMode = (typeof SSLMode)[keyof typeof SSLMode];
export interface DatabaseTableList {
Include?: string[];
Exclude?: string[];
}
export declare const DatabaseType: {
readonly MySQL: "MySQL";
readonly PostgreSQL: "PostgreSQL";
};
export type DatabaseType = (typeof DatabaseType)[keyof typeof DatabaseType];
export interface DatabaseSourceConfiguration {
Type: DatabaseType | undefined;
Endpoint: string | undefined;
Port: number | undefined;
SSLMode?: SSLMode;
Databases: DatabaseList | undefined;
Tables: DatabaseTableList | undefined;
Columns?: DatabaseColumnList;
SurrogateKeys?: string[];
SnapshotWatermarkTable: string | undefined;
DatabaseSourceAuthenticationConfiguration:
| DatabaseSourceAuthenticationConfiguration
| undefined;
DatabaseSourceVPCConfiguration: DatabaseSourceVPCConfiguration | undefined;
}
export declare const KeyType: {

@@ -279,2 +328,3 @@ readonly AWS_OWNED_CMK: "AWS_OWNED_CMK";

export declare const DeliveryStreamType: {
readonly DatabaseAsSource: "DatabaseAsSource";
readonly DirectPut: "DirectPut";

@@ -461,7 +511,2 @@ readonly KinesisStreamAsSource: "KinesisStreamAsSource";

(typeof HttpEndpointS3BackupMode)[keyof typeof HttpEndpointS3BackupMode];
export interface SecretsManagerConfiguration {
SecretARN?: string;
RoleARN?: string;
Enabled: boolean | undefined;
}
export interface HttpEndpointDestinationConfiguration {

@@ -479,2 +524,8 @@ EndpointConfiguration: HttpEndpointConfiguration | undefined;

}
export interface PartitionField {
SourceName: string | undefined;
}
export interface PartitionSpec {
Identity?: PartitionField[];
}
export interface DestinationTableConfiguration {

@@ -484,2 +535,3 @@ DestinationTableName: string | undefined;

UniqueKeys?: string[];
PartitionSpec?: PartitionSpec;
S3ErrorOutputPrefix?: string;

@@ -493,4 +545,12 @@ }

(typeof IcebergS3BackupMode)[keyof typeof IcebergS3BackupMode];
export interface SchemaEvolutionConfiguration {
Enabled: boolean | undefined;
}
export interface TableCreationConfiguration {
Enabled: boolean | undefined;
}
export interface IcebergDestinationConfiguration {
DestinationTableConfigurationList?: DestinationTableConfiguration[];
SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration;
TableCreationConfiguration?: TableCreationConfiguration;
BufferingHints?: BufferingHints;

@@ -640,2 +700,3 @@ CloudWatchLoggingOptions?: CloudWatchLoggingOptions;

IcebergDestinationConfiguration?: IcebergDestinationConfiguration;
DatabaseSourceConfiguration?: DatabaseSourceConfiguration;
}

@@ -674,14 +735,2 @@ export interface CreateDeliveryStreamOutput {

}
export interface DeleteDeliveryStreamInput {
DeliveryStreamName: string | undefined;
AllowForceDelete?: boolean;
}
export interface DeleteDeliveryStreamOutput {}
export declare class ResourceNotFoundException extends __BaseException {
readonly name: "ResourceNotFoundException";
readonly $fault: "client";
constructor(
opts: __ExceptionOptionType<ResourceNotFoundException, __BaseException>
);
}
export declare const DeliveryStreamFailureType: {

@@ -703,2 +752,4 @@ readonly CREATE_ENI_FAILED: "CREATE_ENI_FAILED";

readonly UNKNOWN_ERROR: "UNKNOWN_ERROR";
readonly VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND";
readonly VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED: "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED";
};

@@ -711,2 +762,49 @@ export type DeliveryStreamFailureType =

}
export declare const SnapshotRequestedBy: {
readonly FIREHOSE: "FIREHOSE";
readonly USER: "USER";
};
export type SnapshotRequestedBy =
(typeof SnapshotRequestedBy)[keyof typeof SnapshotRequestedBy];
export declare const SnapshotStatus: {
readonly COMPLETE: "COMPLETE";
readonly IN_PROGRESS: "IN_PROGRESS";
readonly SUSPENDED: "SUSPENDED";
};
export type SnapshotStatus =
(typeof SnapshotStatus)[keyof typeof SnapshotStatus];
export interface DatabaseSnapshotInfo {
Id: string | undefined;
Table: string | undefined;
RequestTimestamp: Date | undefined;
RequestedBy: SnapshotRequestedBy | undefined;
Status: SnapshotStatus | undefined;
FailureDescription?: FailureDescription;
}
export interface DatabaseSourceDescription {
Type?: DatabaseType;
Endpoint?: string;
Port?: number;
SSLMode?: SSLMode;
Databases?: DatabaseList;
Tables?: DatabaseTableList;
Columns?: DatabaseColumnList;
SurrogateKeys?: string[];
SnapshotWatermarkTable?: string;
SnapshotInfo?: DatabaseSnapshotInfo[];
DatabaseSourceAuthenticationConfiguration?: DatabaseSourceAuthenticationConfiguration;
DatabaseSourceVPCConfiguration?: DatabaseSourceVPCConfiguration;
}
export interface DeleteDeliveryStreamInput {
DeliveryStreamName: string | undefined;
AllowForceDelete?: boolean;
}
export interface DeleteDeliveryStreamOutput {}
export declare class ResourceNotFoundException extends __BaseException {
readonly name: "ResourceNotFoundException";
readonly $fault: "client";
constructor(
opts: __ExceptionOptionType<ResourceNotFoundException, __BaseException>
);
}
export declare const DeliveryStreamEncryptionStatus: {

@@ -788,2 +886,4 @@ readonly DISABLED: "DISABLED";

DestinationTableConfigurationList?: DestinationTableConfiguration[];
SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration;
TableCreationConfiguration?: TableCreationConfiguration;
BufferingHints?: BufferingHints;

@@ -872,2 +972,3 @@ CloudWatchLoggingOptions?: CloudWatchLoggingOptions;

MSKSourceDescription?: MSKSourceDescription;
DatabaseSourceDescription?: DatabaseSourceDescription;
}

@@ -1018,2 +1119,4 @@ export interface DeliveryStreamDescription {

DestinationTableConfigurationList?: DestinationTableConfiguration[];
SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration;
TableCreationConfiguration?: TableCreationConfiguration;
BufferingHints?: BufferingHints;

@@ -1020,0 +1123,0 @@ CloudWatchLoggingOptions?: CloudWatchLoggingOptions;

{
"name": "@aws-sdk/client-firehose",
"description": "AWS SDK for JavaScript Firehose Client for Node.js, Browser and React Native",
"version": "3.687.0",
"version": "3.688.0",
"scripts": {

@@ -6,0 +6,0 @@ "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc