Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@google-cloud/firestore

Package Overview
Dependencies
Maintainers
1
Versions
146
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@google-cloud/firestore - npm Package Compare versions

Comparing version 4.9.9 to 4.10.0-beta.0

build/src/backoff.js.map

30

build/src/bulk-writer.d.ts

@@ -158,2 +158,24 @@ /*!

/**
* The number of pending operations enqueued on this BulkWriter instance.
* An operation is considered pending if BulkWriter has sent it via RPC and
* is awaiting the result.
* @private
*/
private _pendingOpsCount;
/**
* An array containing buffered BulkWriter operations after the maximum number
* of pending operations has been enqueued.
* @private
*/
private _bufferedOperations;
_getBufferedOperationsCount(): number;
/**
* The maximum number of pending operations that can be enqueued onto this
* BulkWriter instance. Once the this number of writes have been enqueued,
* subsequent writes are buffered.
* @private
*/
private _maxPendingOpCount;
_setMaxPendingOpCount(newMax: number): void;
/**
* The user-provided callback to be run every time a BulkWriter operation

@@ -376,3 +398,3 @@ * successfully completes.

*/
private _verifyNotClosed;
_verifyNotClosed(): void;
/**

@@ -403,2 +425,8 @@ * Sends the current batch and resets `this._bulkCommitBatch`.

/**
* Manages the pending operation counter and schedules the next BulkWriter
* operation if we're under the maximum limit.
* @private
*/
private _processBufferedOps;
/**
* Schedules the provided operations on current BulkCommitBatch.

@@ -405,0 +433,0 @@ * Sends the BulkCommitBatch if it reaches maximum capacity.

@@ -44,2 +44,9 @@ "use strict";

const RATE_LIMITER_MULTIPLIER_MILLIS = 5 * 60 * 1000;
/*!
* The default maximum number of pending operations that can be enqueued onto a
* BulkWriter instance. An operation is considered pending if BulkWriter has
* sent it via RPC and is awaiting the result. BulkWriter buffers additional
* writes after this many pending operations in order to avoiding going OOM.
*/
const DEFAULT_MAXIMUM_PENDING_OPERATIONS_COUNT = 500;
/**

@@ -243,2 +250,22 @@ * Represents a single write for BulkWriter, encapsulating operation dispatch

/**
* The number of pending operations enqueued on this BulkWriter instance.
* An operation is considered pending if BulkWriter has sent it via RPC and
* is awaiting the result.
* @private
*/
this._pendingOpsCount = 0;
/**
* An array containing buffered BulkWriter operations after the maximum number
* of pending operations has been enqueued.
* @private
*/
this._bufferedOperations = [];
/**
* The maximum number of pending operations that can be enqueued onto this
* BulkWriter instance. Once the this number of writes have been enqueued,
* subsequent writes are buffered.
* @private
*/
this._maxPendingOpCount = DEFAULT_MAXIMUM_PENDING_OPERATIONS_COUNT;
/**
* The user-provided callback to be run every time a BulkWriter operation

@@ -292,2 +319,10 @@ * successfully completes.

}
// Visible for testing.
_getBufferedOperationsCount() {
return this._bufferedOperations.length;
}
// Visible for testing.
_setMaxPendingOpCount(newMax) {
this._maxPendingOpCount = newMax;
}
/**

@@ -630,6 +665,46 @@ * Create a document with the provided data. This single operation will fail

const bulkWriterOp = new BulkWriterOperation(ref, type, this._sendFn.bind(this, enqueueOnBatchCallback), this._errorFn.bind(this), this._successFn.bind(this));
this._sendFn(enqueueOnBatchCallback, bulkWriterOp);
return bulkWriterOp.promise;
// Advance the `_lastOp` pointer. This ensures that `_lastOp` only resolves
// when both the previous and the current write resolves.
this._lastOp = this._lastOp.then(() => util_1.silencePromise(bulkWriterOp.promise));
// Schedule the operation if the BulkWriter has fewer than the maximum
// number of allowed pending operations, or add the operation to the
// buffer.
if (this._pendingOpsCount < this._maxPendingOpCount) {
this._pendingOpsCount++;
this._sendFn(enqueueOnBatchCallback, bulkWriterOp);
}
else {
this._bufferedOperations.push(() => {
this._pendingOpsCount++;
this._sendFn(enqueueOnBatchCallback, bulkWriterOp);
});
}
// Chain the BulkWriter operation promise with the buffer processing logic
// in order to ensure that it runs and that subsequent operations are
// enqueued before the next batch is scheduled in `_sendBatch()`.
return bulkWriterOp.promise
.then(res => {
this._pendingOpsCount--;
this._processBufferedOps();
return res;
})
.catch(err => {
this._pendingOpsCount--;
this._processBufferedOps();
throw err;
});
}
/**
* Manages the pending operation counter and schedules the next BulkWriter
* operation if we're under the maximum limit.
* @private
*/
_processBufferedOps() {
if (this._pendingOpsCount < this._maxPendingOpCount &&
this._bufferedOperations.length > 0) {
const nextOp = this._bufferedOperations.shift();
nextOp();
}
}
/**
* Schedules the provided operations on current BulkCommitBatch.

@@ -646,8 +721,4 @@ * Sends the BulkCommitBatch if it reaches maximum capacity.

}
// Run the operation on the current batch and advance the `_lastOp` pointer.
// This ensures that `_lastOp` only resolves when both the previous and the
// current write resolves.
enqueueOnBatchCallback(this._bulkCommitBatch);
this._bulkCommitBatch.processLastOperation(op);
this._lastOp = this._lastOp.then(() => util_1.silencePromise(op.promise));
if (this._bulkCommitBatch._opCount === this._maxBatchSize) {

@@ -654,0 +725,0 @@ this._scheduleCurrentBatch();

@@ -45,2 +45,6 @@ /*!

export { Status as GrpcStatus } from 'google-gax';
/*!
* The maximum number of times to retry idempotent requests.
*/
export declare const MAX_REQUEST_RETRIES = 5;
/**

@@ -263,2 +267,15 @@ * Document data (e.g. for use with

/**
* A lazy-loaded BulkWriter instance to be used with recursiveDelete() if no
* BulkWriter instance is provided.
*
* @private
*/
private _bulkWriter;
/**
* Lazy-load the Firestore's default BulkWriter.
*
* @private
*/
private getBulkWriter;
/**
* Number of pending operations on the client.

@@ -610,8 +627,37 @@ *

/**
* Retrieves all descendant documents nested under the provided reference.
* Recursively deletes all documents and subcollections at and under the
* specified level.
*
* @private
* @return {Stream<QueryDocumentSnapshot>} Stream of descendant documents.
* If any delete fails, the promise is rejected with an error message
* containing the number of failed deletes and the stack trace of the last
* failed delete. The provided reference is deleted regardless of whether
* all deletes succeeded.
*
* `recursiveDelete()` uses a BulkWriter instance with default settings to
* perform the deletes. To customize throttling rates or add success/error
* callbacks, pass in a custom BulkWriter instance.
*
* @param ref The reference of a document or collection to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the
* deletes.
* @return A promise that resolves when all deletes have been performed.
* The promise is rejected if any of the deletes fail.
*
* @example
* // Recursively delete a reference and log the references of failures.
* const bulkWriter = firestore.bulkWriter();
* bulkWriter
* .onWriteError((error) => {
* if (
* error.failedAttempts < MAX_RETRY_ATTEMPTS
* ) {
* return true;
* } else {
* console.log('Failed write at document: ', error.documentRef.path);
* return false;
* }
* });
* await firestore.recursiveDelete(docRef, bulkWriter);
*/
_getAllDescendants(ref: CollectionReference | DocumentReference): NodeJS.ReadableStream;
recursiveDelete(ref: firestore.CollectionReference<unknown> | firestore.DocumentReference<unknown>, bulkWriter?: BulkWriter): Promise<void>;
/**

@@ -618,0 +664,0 @@ * Terminates the Firestore client and closes all open streams.

95

build/src/index.js

@@ -41,2 +41,3 @@ "use strict";

exports.CollectionGroup = collection_group_1.CollectionGroup;
const recursive_delete_1 = require("./recursive-delete");
var reference_3 = require("./reference");

@@ -112,3 +113,3 @@ exports.CollectionReference = reference_3.CollectionReference;

*/
const MAX_REQUEST_RETRIES = 5;
exports.MAX_REQUEST_RETRIES = 5;
/*!

@@ -126,13 +127,2 @@ * The default number of idle GRPC channel to keep.

/**
* Datastore allowed numeric IDs where Firestore only allows strings. Numeric
* IDs are exposed to Firestore as __idNUM__, so this is the lowest possible
* negative numeric value expressed in that format.
*
* This constant is used to specify startAt/endAt values when querying for all
* descendants in a single collection.
*
* @private
*/
const REFERENCE_NAME_MIN_ID = '__id-9223372036854775808__';
/**
* Document data (e.g. for use with

@@ -424,2 +414,13 @@ * [set()]{@link DocumentReference#set}) consisting of fields mapped

/**
* Lazy-load the Firestore's default BulkWriter.
*
* @private
*/
getBulkWriter() {
if (!this._bulkWriter) {
this._bulkWriter = this.bulkWriter();
}
return this._bulkWriter;
}
/**
* Specifies custom settings to be used to configure the `Firestore`

@@ -938,36 +939,40 @@ * instance. Can only be invoked once and before any other Firestore method.

/**
* Retrieves all descendant documents nested under the provided reference.
* Recursively deletes all documents and subcollections at and under the
* specified level.
*
* @private
* @return {Stream<QueryDocumentSnapshot>} Stream of descendant documents.
* If any delete fails, the promise is rejected with an error message
* containing the number of failed deletes and the stack trace of the last
* failed delete. The provided reference is deleted regardless of whether
* all deletes succeeded.
*
* `recursiveDelete()` uses a BulkWriter instance with default settings to
* perform the deletes. To customize throttling rates or add success/error
* callbacks, pass in a custom BulkWriter instance.
*
* @param ref The reference of a document or collection to delete.
* @param bulkWriter A custom BulkWriter instance used to perform the
* deletes.
* @return A promise that resolves when all deletes have been performed.
* The promise is rejected if any of the deletes fail.
*
* @example
* // Recursively delete a reference and log the references of failures.
* const bulkWriter = firestore.bulkWriter();
* bulkWriter
* .onWriteError((error) => {
* if (
* error.failedAttempts < MAX_RETRY_ATTEMPTS
* ) {
* return true;
* } else {
* console.log('Failed write at document: ', error.documentRef.path);
* return false;
* }
* });
* await firestore.recursiveDelete(docRef, bulkWriter);
*/
// TODO(chenbrian): Make this a private method after adding recursive delete.
_getAllDescendants(ref) {
// The parent is the closest ancestor document to the location we're
// deleting. If we are deleting a document, the parent is the path of that
// document. If we are deleting a collection, the parent is the path of the
// document containing that collection (or the database root, if it is a
// root collection).
let parentPath = ref._resourcePath;
if (ref instanceof reference_1.CollectionReference) {
parentPath = parentPath.popLast();
}
const collectionId = ref instanceof reference_1.CollectionReference ? ref.id : ref.parent.id;
let query = new reference_1.Query(this, reference_1.QueryOptions.forKindlessAllDescendants(parentPath, collectionId));
// Query for names only to fetch empty snapshots.
query = query.select(path_1.FieldPath.documentId());
if (ref instanceof reference_1.CollectionReference) {
// To find all descendants of a collection reference, we need to use a
// composite filter that captures all documents that start with the
// collection prefix. The MIN_KEY constant represents the minimum key in
// this collection, and a null byte + the MIN_KEY represents the minimum
// key is the next possible collection.
const nullChar = String.fromCharCode(0);
const startAt = collectionId + '/' + REFERENCE_NAME_MIN_ID;
const endAt = collectionId + nullChar + '/' + REFERENCE_NAME_MIN_ID;
query = query
.where(path_1.FieldPath.documentId(), '>=', startAt)
.where(path_1.FieldPath.documentId(), '<', endAt);
}
return query.stream();
recursiveDelete(ref, bulkWriter) {
const writer = bulkWriter !== null && bulkWriter !== void 0 ? bulkWriter : this.getBulkWriter();
const deleter = new recursive_delete_1.RecursiveDelete(this, writer, ref);
return deleter.run();
}

@@ -1069,3 +1074,3 @@ /**

let lastError = undefined;
for (let attempt = 0; attempt < MAX_REQUEST_RETRIES; ++attempt) {
for (let attempt = 0; attempt < exports.MAX_REQUEST_RETRIES; ++attempt) {
if (lastError) {

@@ -1072,0 +1077,0 @@ logger_1.logger('Firestore._retry', requestTag, 'Retrying request that failed with error:', lastError);

@@ -607,3 +607,4 @@ /*!

readonly kindless: boolean;
constructor(parentPath: ResourcePath, collectionId: string, converter: firestore.FirestoreDataConverter<T>, allDescendants: boolean, fieldFilters: FieldFilter[], fieldOrders: FieldOrder[], startAt?: QueryCursor | undefined, endAt?: QueryCursor | undefined, limit?: number | undefined, limitType?: LimitType | undefined, offset?: number | undefined, projection?: api.StructuredQuery.IProjection | undefined, kindless?: boolean);
readonly requireConsistency: boolean;
constructor(parentPath: ResourcePath, collectionId: string, converter: firestore.FirestoreDataConverter<T>, allDescendants: boolean, fieldFilters: FieldFilter[], fieldOrders: FieldOrder[], startAt?: QueryCursor | undefined, endAt?: QueryCursor | undefined, limit?: number | undefined, limitType?: LimitType | undefined, offset?: number | undefined, projection?: api.StructuredQuery.IProjection | undefined, kindless?: boolean, requireConsistency?: boolean);
/**

@@ -625,3 +626,3 @@ * Returns query options for a collection group query.

*/
static forKindlessAllDescendants<T = firestore.DocumentData>(parent: ResourcePath, id: string): QueryOptions<T>;
static forKindlessAllDescendants<T = firestore.DocumentData>(parent: ResourcePath, id: string, requireConsistency?: boolean): QueryOptions<T>;
/**

@@ -628,0 +629,0 @@ * Returns the union of the current and the provided options.

@@ -125,3 +125,3 @@ "use strict";

const seconds = Math.floor(milliseconds / 1000);
const nanos = Math.floor(milliseconds * MS_TO_NANOS - seconds * 1000 * MS_TO_NANOS);
const nanos = Math.floor((milliseconds - seconds * 1000) * MS_TO_NANOS);
return new Timestamp(seconds, nanos);

@@ -128,0 +128,0 @@ }

@@ -10,11 +10,14 @@ {

],
"deadline_exceeded_internal_unavailable": [
"deadline_exceeded_resource_exhausted_internal_unavailable": [
"DEADLINE_EXCEEDED",
"RESOURCE_EXHAUSTED",
"INTERNAL",
"UNAVAILABLE"
],
"unavailable": [
"resource_exhausted_unavailable": [
"RESOURCE_EXHAUSTED",
"UNAVAILABLE"
],
"aborted_unavailable": [
"resource_exhausted_aborted_unavailable": [
"RESOURCE_EXHAUSTED",
"ABORTED",

@@ -38,3 +41,3 @@ "UNAVAILABLE"

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -44,3 +47,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -50,3 +53,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "unavailable",
"retry_codes_name": "resource_exhausted_unavailable",
"retry_params_name": "default"

@@ -56,3 +59,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -62,3 +65,3 @@ },

"timeout_millis": 300000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -68,3 +71,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -74,3 +77,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "unavailable",
"retry_codes_name": "resource_exhausted_unavailable",
"retry_params_name": "default"

@@ -80,3 +83,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -86,3 +89,3 @@ },

"timeout_millis": 300000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -92,3 +95,3 @@ },

"timeout_millis": 300000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -103,3 +106,3 @@ },

"timeout_millis": 86400000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -109,3 +112,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_internal_unavailable",
"retry_codes_name": "deadline_exceeded_resource_exhausted_internal_unavailable",
"retry_params_name": "default"

@@ -115,3 +118,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "aborted_unavailable",
"retry_codes_name": "resource_exhausted_aborted_unavailable",
"retry_params_name": "default"

@@ -121,3 +124,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "unavailable",
"retry_codes_name": "resource_exhausted_unavailable",
"retry_params_name": "default"

@@ -124,0 +127,0 @@ }

@@ -9,7 +9,2 @@ {

"UNAVAILABLE"
],
"deadline_exceeded_resource_exhausted_unavailable": [
"DEADLINE_EXCEEDED",
"RESOURCE_EXHAUSTED",
"UNAVAILABLE"
]

@@ -31,3 +26,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -37,10 +32,5 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"CreateDocument": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"UpdateDocument": {

@@ -53,3 +43,3 @@ "timeout_millis": 60000,

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -59,3 +49,3 @@ },

"timeout_millis": 300000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -65,3 +55,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -76,3 +66,3 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -82,5 +72,9 @@ },

"timeout_millis": 300000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"PartitionQuery": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"Write": {

@@ -93,3 +87,3 @@ "timeout_millis": 86400000,

"timeout_millis": 86400000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -99,4 +93,13 @@ },

"timeout_millis": 60000,
"retry_codes_name": "deadline_exceeded_resource_exhausted_unavailable",
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"BatchWrite": {
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"CreateDocument": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}

@@ -103,0 +106,0 @@ }

@@ -9,16 +9,8 @@ /// <reference types="node" />

*
* This service exposes several types of comparable timestamps:
*
* * `create_time` - The time at which a document was created. Changes only
* when a document is deleted, then re-created. Increases in a strict
* monotonic fashion.
* * `update_time` - The time at which a document was last updated. Changes
* every time a document is modified. Does not change when a write results
* in no modifications. Increases in a strict monotonic fashion.
* * `read_time` - The time at which a particular state was observed. Used
* to denote a consistent snapshot of the database or the time at which a
* Document was observed to not exist.
* * `commit_time` - The time at which the writes in a transaction were
* committed. Any read with an equal or greater `read_time` is guaranteed
* to see the effects of the transaction.
* Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
* document database that simplifies storing, syncing, and querying data for
* your mobile, web, and IoT apps at global scale. Its client libraries provide
* live synchronization and offline support, while its security features and
* integrations with Firebase and Google Cloud Platform (GCP) accelerate
* building truly serverless apps.
* @class

@@ -119,5 +111,2 @@ * @deprecated Use v1/firestore_client instead.

getDocument(request: protos.google.firestore.v1beta1.IGetDocumentRequest, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.IGetDocumentRequest | null | undefined, {} | null | undefined>): void;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, options?: CallOptions): Promise<[protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | undefined, {} | undefined]>;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, options: CallOptions, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined, {} | null | undefined>): void;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined, {} | null | undefined>): void;
updateDocument(request: protos.google.firestore.v1beta1.IUpdateDocumentRequest, options?: CallOptions): Promise<[protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.IUpdateDocumentRequest | undefined, {} | undefined]>;

@@ -138,2 +127,8 @@ updateDocument(request: protos.google.firestore.v1beta1.IUpdateDocumentRequest, options: CallOptions, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.IUpdateDocumentRequest | null | undefined, {} | null | undefined>): void;

rollback(request: protos.google.firestore.v1beta1.IRollbackRequest, callback: Callback<protos.google.protobuf.IEmpty, protos.google.firestore.v1beta1.IRollbackRequest | null | undefined, {} | null | undefined>): void;
batchWrite(request: protos.google.firestore.v1beta1.IBatchWriteRequest, options?: CallOptions): Promise<[protos.google.firestore.v1beta1.IBatchWriteResponse, protos.google.firestore.v1beta1.IBatchWriteRequest | undefined, {} | undefined]>;
batchWrite(request: protos.google.firestore.v1beta1.IBatchWriteRequest, options: CallOptions, callback: Callback<protos.google.firestore.v1beta1.IBatchWriteResponse, protos.google.firestore.v1beta1.IBatchWriteRequest | null | undefined, {} | null | undefined>): void;
batchWrite(request: protos.google.firestore.v1beta1.IBatchWriteRequest, callback: Callback<protos.google.firestore.v1beta1.IBatchWriteResponse, protos.google.firestore.v1beta1.IBatchWriteRequest | null | undefined, {} | null | undefined>): void;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, options?: CallOptions): Promise<[protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | undefined, {} | undefined]>;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, options: CallOptions, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined, {} | null | undefined>): void;
createDocument(request: protos.google.firestore.v1beta1.ICreateDocumentRequest, callback: Callback<protos.google.firestore.v1beta1.IDocument, protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined, {} | null | undefined>): void;
/**

@@ -169,3 +164,3 @@ * Gets multiple documents.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {object} [options]

@@ -207,3 +202,3 @@ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {object} [options]

@@ -294,3 +289,3 @@ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {boolean} request.showMissing

@@ -348,3 +343,3 @@ * If the list should show missing documents. A missing document is a

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {boolean} request.showMissing

@@ -375,2 +370,127 @@ * If the list should show missing documents. A missing document is a

listDocumentsAsync(request?: protos.google.firestore.v1beta1.IListDocumentsRequest, options?: CallOptions): AsyncIterable<protos.google.firestore.v1beta1.IDocument>;
partitionQuery(request: protos.google.firestore.v1beta1.IPartitionQueryRequest, options?: CallOptions): Promise<[protos.google.firestore.v1beta1.ICursor[], protos.google.firestore.v1beta1.IPartitionQueryRequest | null, protos.google.firestore.v1beta1.IPartitionQueryResponse]>;
partitionQuery(request: protos.google.firestore.v1beta1.IPartitionQueryRequest, options: CallOptions, callback: PaginationCallback<protos.google.firestore.v1beta1.IPartitionQueryRequest, protos.google.firestore.v1beta1.IPartitionQueryResponse | null | undefined, protos.google.firestore.v1beta1.ICursor>): void;
partitionQuery(request: protos.google.firestore.v1beta1.IPartitionQueryRequest, callback: PaginationCallback<protos.google.firestore.v1beta1.IPartitionQueryRequest, protos.google.firestore.v1beta1.IPartitionQueryResponse | null | undefined, protos.google.firestore.v1beta1.ICursor>): void;
/**
* Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object.
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource name. In the format:
* `projects/{project_id}/databases/{database_id}/documents`.
* Document resource names are not supported; only database resource names
* can be specified.
* @param {google.firestore.v1beta1.StructuredQuery} request.structuredQuery
* A structured query.
* Query must specify collection with all descendants and be ordered by name
* ascending. Other filters, order bys, limits, offsets, and start/end
* cursors are not supported.
* @param {number} request.partitionCount
* The desired maximum number of partition points.
* The partitions may be returned across multiple pages of results.
* The number must be positive. The actual number of partitions
* returned may be fewer.
*
* For example, this may be set to one fewer than the number of parallel
* queries to be run, or in running a data pipeline job, one fewer than the
* number of workers or compute instances available.
* @param {string} request.pageToken
* The `next_page_token` value returned from a previous call to
* PartitionQuery that may be used to get an additional set of results.
* There are no ordering guarantees between sets of results. Thus, using
* multiple sets of results will require merging the different result sets.
*
* For example, two subsequent calls using a page_token may return:
*
* * cursor B, cursor M, cursor Q
* * cursor A, cursor U, cursor W
*
* To obtain a complete result set ordered with respect to the results of the
* query supplied to PartitionQuery, the results sets should be merged:
* cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
* @param {number} request.pageSize
* The maximum number of partitions to return in this call, subject to
* `partition_count`.
*
* For example, if `partition_count` = 10 and `page_size` = 8, the first call
* to PartitionQuery will return up to 8 partitions and a `next_page_token`
* if more results exist. A second call to PartitionQuery will return up to
* 2 partitions, to complete the total of 10 specified in `partition_count`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Stream}
* An object stream which emits an object representing [Cursor]{@link google.firestore.v1beta1.Cursor} on 'data' event.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed. Note that it can affect your quota.
* We recommend using `partitionQueryAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
partitionQueryStream(request?: protos.google.firestore.v1beta1.IPartitionQueryRequest, options?: CallOptions): Transform;
/**
* Equivalent to `partitionQuery`, but returns an iterable object.
*
* `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand.
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource name. In the format:
* `projects/{project_id}/databases/{database_id}/documents`.
* Document resource names are not supported; only database resource names
* can be specified.
* @param {google.firestore.v1beta1.StructuredQuery} request.structuredQuery
* A structured query.
* Query must specify collection with all descendants and be ordered by name
* ascending. Other filters, order bys, limits, offsets, and start/end
* cursors are not supported.
* @param {number} request.partitionCount
* The desired maximum number of partition points.
* The partitions may be returned across multiple pages of results.
* The number must be positive. The actual number of partitions
* returned may be fewer.
*
* For example, this may be set to one fewer than the number of parallel
* queries to be run, or in running a data pipeline job, one fewer than the
* number of workers or compute instances available.
* @param {string} request.pageToken
* The `next_page_token` value returned from a previous call to
* PartitionQuery that may be used to get an additional set of results.
* There are no ordering guarantees between sets of results. Thus, using
* multiple sets of results will require merging the different result sets.
*
* For example, two subsequent calls using a page_token may return:
*
* * cursor B, cursor M, cursor Q
* * cursor A, cursor U, cursor W
*
* To obtain a complete result set ordered with respect to the results of the
* query supplied to PartitionQuery, the results sets should be merged:
* cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
* @param {number} request.pageSize
* The maximum number of partitions to return in this call, subject to
* `partition_count`.
*
* For example, if `partition_count` = 10 and `page_size` = 8, the first call
* to PartitionQuery will return up to 8 partitions and a `next_page_token`
* if more results exist. A second call to PartitionQuery will return up to
* 2 partitions, to complete the total of 10 specified in `partition_count`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Object}
* An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols).
* When you iterate the returned iterable, each element will be an object representing
* [Cursor]{@link google.firestore.v1beta1.Cursor}. The API will be called under the hood as needed, once per the page,
* so you can stop the iteration when you don't need more results.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
* @example
* const iterable = client.partitionQueryAsync(request);
* for await (const response of iterable) {
* // process response
* }
*/
partitionQueryAsync(request?: protos.google.firestore.v1beta1.IPartitionQueryRequest, options?: CallOptions): AsyncIterable<protos.google.firestore.v1beta1.ICursor>;
listCollectionIds(request: protos.google.firestore.v1beta1.IListCollectionIdsRequest, options?: CallOptions): Promise<[string[], protos.google.firestore.v1beta1.IListCollectionIdsRequest | null, protos.google.firestore.v1beta1.IListCollectionIdsResponse]>;

@@ -377,0 +497,0 @@ listCollectionIds(request: protos.google.firestore.v1beta1.IListCollectionIdsRequest, options: CallOptions, callback: PaginationCallback<protos.google.firestore.v1beta1.IListCollectionIdsRequest, protos.google.firestore.v1beta1.IListCollectionIdsResponse | null | undefined, string>): void;

@@ -18,7 +18,2 @@ {

},
"CreateDocument": {
"methods": [
"createDocument"
]
},
"UpdateDocument": {

@@ -49,2 +44,12 @@ "methods": [

},
"BatchWrite": {
"methods": [
"batchWrite"
]
},
"CreateDocument": {
"methods": [
"createDocument"
]
},
"BatchGetDocuments": {

@@ -77,2 +82,9 @@ "methods": [

},
"PartitionQuery": {
"methods": [
"partitionQuery",
"partitionQueryStream",
"partitionQueryAsync"
]
},
"ListCollectionIds": {

@@ -95,7 +107,2 @@ "methods": [

},
"CreateDocument": {
"methods": [
"createDocument"
]
},
"UpdateDocument": {

@@ -126,2 +133,12 @@ "methods": [

},
"BatchWrite": {
"methods": [
"batchWrite"
]
},
"CreateDocument": {
"methods": [
"createDocument"
]
},
"ListDocuments": {

@@ -134,2 +151,9 @@ "methods": [

},
"PartitionQuery": {
"methods": [
"partitionQuery",
"partitionQueryStream",
"partitionQueryAsync"
]
},
"ListCollectionIds": {

@@ -136,0 +160,0 @@ "methods": [

@@ -7,2 +7,14 @@ # Changelog

## [4.10.0](https://www.github.com/googleapis/nodejs-firestore/compare/v4.9.9...v4.10.0) (2021-04-15)
### Features
* add buffering layer to BulkWriter ([#1470](https://www.github.com/googleapis/nodejs-firestore/issues/1470)) ([9cc9548](https://www.github.com/googleapis/nodejs-firestore/commit/9cc954849c74199f01e52b24fc7ba045d5b56be4))
### Bug Fixes
* use BigInt when calculating nanos in Timestamp.fromMillis() ([#1468](https://www.github.com/googleapis/nodejs-firestore/issues/1468)) ([cf1949f](https://www.github.com/googleapis/nodejs-firestore/commit/cf1949f99f840d1e34edfa31a223418abdf48372))
### [4.9.9](https://www.github.com/googleapis/nodejs-firestore/compare/v4.9.8...v4.9.9) (2021-04-07)

@@ -9,0 +21,0 @@

{
"name": "@google-cloud/firestore",
"description": "Firestore Client Library for Node.js",
"version": "4.9.9",
"version": "4.10.0-beta.0",
"license": "Apache-2.0",

@@ -68,3 +68,3 @@ "author": "Google Inc.",

"@types/node": "^12.12.17",
"@types/sinon": "^9.0.0",
"@types/sinon": "^10.0.0",
"@types/through2": "^2.0.34",

@@ -71,0 +71,0 @@ "c8": "^7.0.0",

@@ -31,16 +31,8 @@ /*!

*
* This service exposes several types of comparable timestamps:
*
* * `create_time` - The time at which a document was created. Changes only
* when a document is deleted, then re-created. Increases in a strict
* monotonic fashion.
* * `update_time` - The time at which a document was last updated. Changes
* every time a document is modified. Does not change when a write results
* in no modifications. Increases in a strict monotonic fashion.
* * `read_time` - The time at which a particular state was observed. Used
* to denote a consistent snapshot of the database or the time at which a
* Document was observed to not exist.
* * `commit_time` - The time at which the writes in a transaction were
* committed. Any read with an equal or greater `read_time` is guaranteed
* to see the effects of the transaction.
* Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
* document database that simplifies storing, syncing, and querying data for
* your mobile, web, and IoT apps at global scale. Its client libraries provide
* live synchronization and offline support, while its security features and
* integrations with Firebase and Google Cloud Platform (GCP) accelerate
* building truly serverless apps.
* @class

@@ -165,29 +157,2 @@ * @deprecated Use v1/firestore_client instead.

): void;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
options?: CallOptions
): Promise<
[
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | undefined,
{} | undefined
]
>;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
options: CallOptions,
callback: Callback<
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined,
{} | null | undefined
>
): void;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
callback: Callback<
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined,
{} | null | undefined
>
): void;
updateDocument(

@@ -332,2 +297,56 @@ request: protos.google.firestore.v1beta1.IUpdateDocumentRequest,

): void;
batchWrite(
request: protos.google.firestore.v1beta1.IBatchWriteRequest,
options?: CallOptions
): Promise<
[
protos.google.firestore.v1beta1.IBatchWriteResponse,
protos.google.firestore.v1beta1.IBatchWriteRequest | undefined,
{} | undefined
]
>;
batchWrite(
request: protos.google.firestore.v1beta1.IBatchWriteRequest,
options: CallOptions,
callback: Callback<
protos.google.firestore.v1beta1.IBatchWriteResponse,
protos.google.firestore.v1beta1.IBatchWriteRequest | null | undefined,
{} | null | undefined
>
): void;
batchWrite(
request: protos.google.firestore.v1beta1.IBatchWriteRequest,
callback: Callback<
protos.google.firestore.v1beta1.IBatchWriteResponse,
protos.google.firestore.v1beta1.IBatchWriteRequest | null | undefined,
{} | null | undefined
>
): void;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
options?: CallOptions
): Promise<
[
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | undefined,
{} | undefined
]
>;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
options: CallOptions,
callback: Callback<
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined,
{} | null | undefined
>
): void;
createDocument(
request: protos.google.firestore.v1beta1.ICreateDocumentRequest,
callback: Callback<
protos.google.firestore.v1beta1.IDocument,
protos.google.firestore.v1beta1.ICreateDocumentRequest | null | undefined,
{} | null | undefined
>
): void;
/**

@@ -363,3 +382,3 @@ * Gets multiple documents.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {object} [options]

@@ -404,3 +423,3 @@ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {object} [options]

@@ -518,3 +537,3 @@ * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {boolean} request.showMissing

@@ -575,3 +594,3 @@ * If the list should show missing documents. A missing document is a

* Reads documents as they were at the given time.
* This may not be older than 60 seconds.
* This may not be older than 270 seconds.
* @param {boolean} request.showMissing

@@ -605,2 +624,161 @@ * If the list should show missing documents. A missing document is a

): AsyncIterable<protos.google.firestore.v1beta1.IDocument>;
partitionQuery(
request: protos.google.firestore.v1beta1.IPartitionQueryRequest,
options?: CallOptions
): Promise<
[
protos.google.firestore.v1beta1.ICursor[],
protos.google.firestore.v1beta1.IPartitionQueryRequest | null,
protos.google.firestore.v1beta1.IPartitionQueryResponse
]
>;
partitionQuery(
request: protos.google.firestore.v1beta1.IPartitionQueryRequest,
options: CallOptions,
callback: PaginationCallback<
protos.google.firestore.v1beta1.IPartitionQueryRequest,
| protos.google.firestore.v1beta1.IPartitionQueryResponse
| null
| undefined,
protos.google.firestore.v1beta1.ICursor
>
): void;
partitionQuery(
request: protos.google.firestore.v1beta1.IPartitionQueryRequest,
callback: PaginationCallback<
protos.google.firestore.v1beta1.IPartitionQueryRequest,
| protos.google.firestore.v1beta1.IPartitionQueryResponse
| null
| undefined,
protos.google.firestore.v1beta1.ICursor
>
): void;
/**
* Equivalent to `method.name.toCamelCase()`, but returns a NodeJS Stream object.
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource name. In the format:
* `projects/{project_id}/databases/{database_id}/documents`.
* Document resource names are not supported; only database resource names
* can be specified.
* @param {google.firestore.v1beta1.StructuredQuery} request.structuredQuery
* A structured query.
* Query must specify collection with all descendants and be ordered by name
* ascending. Other filters, order bys, limits, offsets, and start/end
* cursors are not supported.
* @param {number} request.partitionCount
* The desired maximum number of partition points.
* The partitions may be returned across multiple pages of results.
* The number must be positive. The actual number of partitions
* returned may be fewer.
*
* For example, this may be set to one fewer than the number of parallel
* queries to be run, or in running a data pipeline job, one fewer than the
* number of workers or compute instances available.
* @param {string} request.pageToken
* The `next_page_token` value returned from a previous call to
* PartitionQuery that may be used to get an additional set of results.
* There are no ordering guarantees between sets of results. Thus, using
* multiple sets of results will require merging the different result sets.
*
* For example, two subsequent calls using a page_token may return:
*
* * cursor B, cursor M, cursor Q
* * cursor A, cursor U, cursor W
*
* To obtain a complete result set ordered with respect to the results of the
* query supplied to PartitionQuery, the results sets should be merged:
* cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
* @param {number} request.pageSize
* The maximum number of partitions to return in this call, subject to
* `partition_count`.
*
* For example, if `partition_count` = 10 and `page_size` = 8, the first call
* to PartitionQuery will return up to 8 partitions and a `next_page_token`
* if more results exist. A second call to PartitionQuery will return up to
* 2 partitions, to complete the total of 10 specified in `partition_count`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Stream}
* An object stream which emits an object representing [Cursor]{@link google.firestore.v1beta1.Cursor} on 'data' event.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed. Note that it can affect your quota.
* We recommend using `partitionQueryAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
partitionQueryStream(
request?: protos.google.firestore.v1beta1.IPartitionQueryRequest,
options?: CallOptions
): Transform;
/**
* Equivalent to `partitionQuery`, but returns an iterable object.
*
* `for`-`await`-`of` syntax is used with the iterable to get response elements on-demand.
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource name. In the format:
* `projects/{project_id}/databases/{database_id}/documents`.
* Document resource names are not supported; only database resource names
* can be specified.
* @param {google.firestore.v1beta1.StructuredQuery} request.structuredQuery
* A structured query.
* Query must specify collection with all descendants and be ordered by name
* ascending. Other filters, order bys, limits, offsets, and start/end
* cursors are not supported.
* @param {number} request.partitionCount
* The desired maximum number of partition points.
* The partitions may be returned across multiple pages of results.
* The number must be positive. The actual number of partitions
* returned may be fewer.
*
* For example, this may be set to one fewer than the number of parallel
* queries to be run, or in running a data pipeline job, one fewer than the
* number of workers or compute instances available.
* @param {string} request.pageToken
* The `next_page_token` value returned from a previous call to
* PartitionQuery that may be used to get an additional set of results.
* There are no ordering guarantees between sets of results. Thus, using
* multiple sets of results will require merging the different result sets.
*
* For example, two subsequent calls using a page_token may return:
*
* * cursor B, cursor M, cursor Q
* * cursor A, cursor U, cursor W
*
* To obtain a complete result set ordered with respect to the results of the
* query supplied to PartitionQuery, the results sets should be merged:
* cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
* @param {number} request.pageSize
* The maximum number of partitions to return in this call, subject to
* `partition_count`.
*
* For example, if `partition_count` = 10 and `page_size` = 8, the first call
* to PartitionQuery will return up to 8 partitions and a `next_page_token`
* if more results exist. A second call to PartitionQuery will return up to
* 2 partitions, to complete the total of 10 specified in `partition_count`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Object}
* An iterable Object that allows [async iteration](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols).
* When you iterate the returned iterable, each element will be an object representing
* [Cursor]{@link google.firestore.v1beta1.Cursor}. The API will be called under the hood as needed, once per the page,
* so you can stop the iteration when you don't need more results.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
* @example
* const iterable = client.partitionQueryAsync(request);
* for await (const response of iterable) {
* // process response
* }
*/
partitionQueryAsync(
request?: protos.google.firestore.v1beta1.IPartitionQueryRequest,
options?: CallOptions
): AsyncIterable<protos.google.firestore.v1beta1.ICursor>;
listCollectionIds(

@@ -607,0 +785,0 @@ request: protos.google.firestore.v1beta1.IListCollectionIdsRequest,

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc