@google-cloud/firestore
Advanced tools
Comparing version 4.2.0 to 4.4.0
@@ -18,4 +18,12 @@ /*! | ||
import { FieldPath, Firestore } from '.'; | ||
import { RateLimiter } from './rate-limiter'; | ||
import { Timestamp } from './timestamp'; | ||
import { WriteResult } from './write-batch'; | ||
/*! | ||
* The starting maximum number of operations per second as allowed by the | ||
* 500/50/5 rule. | ||
* | ||
* https://cloud.google.com/datastore/docs/best-practices#ramping_up_traffic. | ||
*/ | ||
export declare const DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND = 500; | ||
/** | ||
@@ -46,3 +54,3 @@ * A Firestore BulkWriter than can be used to perform a large number of writes | ||
private rateLimiter; | ||
constructor(firestore: Firestore, enableThrottling: boolean); | ||
constructor(firestore: Firestore, options?: firestore.BulkWriterOptions); | ||
/** | ||
@@ -229,15 +237,13 @@ * Create a document with the provided data. This single operation will fail | ||
/** | ||
* Checks that the provided batch is sendable. To be sendable, a batch must: | ||
* (1) be marked as READY_TO_SEND | ||
* (2) not write to references that are currently in flight | ||
* Sets the maximum number of allowed operations in a batch. | ||
* | ||
* @private | ||
*/ | ||
private isBatchSendable; | ||
_setMaxBatchSize(size: number): void; | ||
/** | ||
* Sets the maximum number of allowed operations in a batch. | ||
* Returns the rate limiter for testing. | ||
* | ||
* @private | ||
*/ | ||
_setMaxBatchSize(size: number): void; | ||
_getRateLimiter(): RateLimiter; | ||
} |
@@ -10,2 +10,3 @@ "use strict"; | ||
const logger_1 = require("./logger"); | ||
const validate_1 = require("./validate"); | ||
/*! | ||
@@ -21,3 +22,3 @@ * The maximum number of writes that can be in a single batch. | ||
*/ | ||
const STARTING_MAXIMUM_OPS_PER_SECOND = 500; | ||
exports.DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND = 500; | ||
/*! | ||
@@ -65,5 +66,5 @@ * The rate by which to increase the capacity as specified by the 500/50/5 rule. | ||
this.completedDeferred = new util_1.Deferred(); | ||
// A map from each write's document path to its corresponding result. | ||
// Only contains writes that have not been resolved. | ||
this.pendingOps = new Map(); | ||
// An array of pending write operations. Only contains writes that have not | ||
// been resolved. | ||
this.pendingOps = []; | ||
this.backoff = new backoff_1.ExponentialBackoff(); | ||
@@ -75,3 +76,3 @@ } | ||
get opCount() { | ||
return this.pendingOps.size; | ||
return this.pendingOps.length; | ||
} | ||
@@ -115,6 +116,9 @@ /** | ||
processOperation(documentRef) { | ||
assert(!this.pendingOps.has(documentRef.path), 'Batch should not contain writes to the same document'); | ||
assert(this.state === BatchState.OPEN, 'Batch should be OPEN when adding writes'); | ||
const deferred = new util_1.Deferred(); | ||
this.pendingOps.set(documentRef.path, deferred); | ||
this.pendingOps.push({ | ||
writeBatchIndex: this.opCount, | ||
key: documentRef.path, | ||
deferred: deferred, | ||
}); | ||
if (this.opCount === this.maxBatchSize) { | ||
@@ -152,13 +156,11 @@ this.state = BatchState.READY_TO_SEND; | ||
// Map the failure to each individual write's result. | ||
results = [...this.pendingOps.keys()].map(path => { | ||
return { key: path, writeTime: null, status: util_1.wrapError(err, stack) }; | ||
results = this.pendingOps.map(op => { | ||
return { key: op.key, writeTime: null, status: util_1.wrapError(err, stack) }; | ||
}); | ||
} | ||
this.processResults(results); | ||
if (this.pendingOps.size > 0) { | ||
this.processResults(results, /* allowRetry= */ true); | ||
if (this.pendingOps.length > 0) { | ||
logger_1.logger('BulkWriter.bulkCommit', null, `Current batch failed at retry #${attempt}. Num failures: ` + | ||
`${this.pendingOps.size}.`); | ||
this.writeBatch = new write_batch_1.WriteBatch(this.firestore, this.writeBatch, [ | ||
...this.pendingOps.keys(), | ||
]); | ||
`${this.pendingOps.length}.`); | ||
this.writeBatch = new write_batch_1.WriteBatch(this.firestore, this.writeBatch, new Set(this.pendingOps.map(op => op.writeBatchIndex))); | ||
} | ||
@@ -170,3 +172,3 @@ else { | ||
} | ||
this.failRemainingOperations(results); | ||
this.processResults(results); | ||
this.completedDeferred.resolve(); | ||
@@ -177,21 +179,26 @@ } | ||
*/ | ||
processResults(results) { | ||
for (const result of results) { | ||
processResults(results, allowRetry = false) { | ||
const newPendingOps = []; | ||
for (let i = 0; i < results.length; i++) { | ||
const result = results[i]; | ||
const op = this.pendingOps[i]; | ||
if (result.status.code === google_gax_1.Status.OK) { | ||
this.pendingOps.get(result.key).resolve(result); | ||
this.pendingOps.delete(result.key); | ||
op.deferred.resolve(result); | ||
} | ||
else if (!this.shouldRetry(result.status.code)) { | ||
this.pendingOps.get(result.key).reject(result.status); | ||
this.pendingOps.delete(result.key); | ||
else if (!allowRetry || !this.shouldRetry(result.status.code)) { | ||
op.deferred.reject(result.status); | ||
} | ||
else { | ||
// Retry the operation if it has not been processed. | ||
// Store the current index of pendingOps to preserve the mapping of | ||
// this operation's index in the underlying WriteBatch. | ||
newPendingOps.push({ | ||
writeBatchIndex: i, | ||
key: op.key, | ||
deferred: op.deferred, | ||
}); | ||
} | ||
} | ||
this.pendingOps = newPendingOps; | ||
} | ||
failRemainingOperations(results) { | ||
for (const result of results) { | ||
assert(result.status.code !== google_gax_1.Status.OK, 'Should not fail successful operation'); | ||
this.pendingOps.get(result.key).reject(result.status); | ||
this.pendingOps.delete(result.key); | ||
} | ||
} | ||
shouldRetry(code) { | ||
@@ -201,12 +208,2 @@ const retryCodes = util_1.getRetryCodes('batchWrite'); | ||
} | ||
hasPath(path) { | ||
for (const [docPath] of this.pendingOps) { | ||
if (docPath === path) | ||
return true; | ||
} | ||
return false; | ||
} | ||
docPaths() { | ||
return this.pendingOps.keys(); | ||
} | ||
/** | ||
@@ -233,3 +230,4 @@ * Returns a promise that resolves when the batch has been sent, and a | ||
class BulkWriter { | ||
constructor(firestore, enableThrottling) { | ||
constructor(firestore, options) { | ||
var _a, _b; | ||
this.firestore = firestore; | ||
@@ -250,7 +248,30 @@ /** | ||
this.firestore._incrementBulkWritersCount(); | ||
if (enableThrottling) { | ||
this.rateLimiter = new rate_limiter_1.RateLimiter(STARTING_MAXIMUM_OPS_PER_SECOND, RATE_LIMITER_MULTIPLIER, RATE_LIMITER_MULTIPLIER_MILLIS); | ||
validateBulkWriterOptions(options); | ||
if ((options === null || options === void 0 ? void 0 : options.throttling) === false) { | ||
this.rateLimiter = new rate_limiter_1.RateLimiter(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY); | ||
} | ||
else { | ||
this.rateLimiter = new rate_limiter_1.RateLimiter(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY); | ||
let startingRate = exports.DEFAULT_STARTING_MAXIMUM_OPS_PER_SECOND; | ||
let maxRate = Number.POSITIVE_INFINITY; | ||
if (typeof (options === null || options === void 0 ? void 0 : options.throttling) !== 'boolean') { | ||
if (((_a = options === null || options === void 0 ? void 0 : options.throttling) === null || _a === void 0 ? void 0 : _a.maxOpsPerSecond) !== undefined) { | ||
maxRate = options.throttling.maxOpsPerSecond; | ||
} | ||
if (((_b = options === null || options === void 0 ? void 0 : options.throttling) === null || _b === void 0 ? void 0 : _b.initialOpsPerSecond) !== undefined) { | ||
startingRate = options.throttling.initialOpsPerSecond; | ||
} | ||
// The initial validation step ensures that the maxOpsPerSecond is | ||
// greater than initialOpsPerSecond. If this inequality is true, that | ||
// means initialOpsPerSecond was not set and maxOpsPerSecond is less | ||
// than the default starting rate. | ||
if (maxRate < startingRate) { | ||
startingRate = maxRate; | ||
} | ||
// Ensure that the batch size is not larger than the number of allowed | ||
// operations per second. | ||
if (startingRate < this.maxBatchSize) { | ||
this.maxBatchSize = startingRate; | ||
} | ||
} | ||
this.rateLimiter = new rate_limiter_1.RateLimiter(startingRate, RATE_LIMITER_MULTIPLIER, RATE_LIMITER_MULTIPLIER_MILLIS, maxRate); | ||
} | ||
@@ -284,3 +305,3 @@ } | ||
this.verifyNotClosed(); | ||
const bulkCommitBatch = this.getEligibleBatch(documentRef); | ||
const bulkCommitBatch = this.getEligibleBatch(); | ||
const resultPromise = bulkCommitBatch.create(documentRef, data); | ||
@@ -320,3 +341,3 @@ this.sendReadyBatches(); | ||
this.verifyNotClosed(); | ||
const bulkCommitBatch = this.getEligibleBatch(documentRef); | ||
const bulkCommitBatch = this.getEligibleBatch(); | ||
const resultPromise = bulkCommitBatch.delete(documentRef, precondition); | ||
@@ -362,3 +383,3 @@ this.sendReadyBatches(); | ||
this.verifyNotClosed(); | ||
const bulkCommitBatch = this.getEligibleBatch(documentRef); | ||
const bulkCommitBatch = this.getEligibleBatch(); | ||
const resultPromise = bulkCommitBatch.set(documentRef, data, options); | ||
@@ -411,3 +432,3 @@ this.sendReadyBatches(); | ||
this.verifyNotClosed(); | ||
const bulkCommitBatch = this.getEligibleBatch(documentRef); | ||
const bulkCommitBatch = this.getEligibleBatch(); | ||
const resultPromise = bulkCommitBatch.update(documentRef, dataOrField, ...preconditionOrValues); | ||
@@ -489,6 +510,6 @@ this.sendReadyBatches(); | ||
*/ | ||
getEligibleBatch(ref) { | ||
getEligibleBatch() { | ||
if (this.batchQueue.length > 0) { | ||
const lastBatch = this.batchQueue[this.batchQueue.length - 1]; | ||
if (lastBatch.state === BatchState.OPEN && !lastBatch.hasPath(ref.path)) { | ||
if (lastBatch.state === BatchState.OPEN) { | ||
return lastBatch; | ||
@@ -526,3 +547,3 @@ } | ||
while (index < unsentBatches.length && | ||
this.isBatchSendable(unsentBatches[index])) { | ||
unsentBatches[index].state === BatchState.READY_TO_SEND) { | ||
const batch = unsentBatches[index]; | ||
@@ -561,27 +582,12 @@ // Send the batch if it is under the rate limit, or schedule another | ||
/** | ||
* Checks that the provided batch is sendable. To be sendable, a batch must: | ||
* (1) be marked as READY_TO_SEND | ||
* (2) not write to references that are currently in flight | ||
* Sets the maximum number of allowed operations in a batch. | ||
* | ||
* @private | ||
*/ | ||
isBatchSendable(batch) { | ||
if (batch.state !== BatchState.READY_TO_SEND) { | ||
return false; | ||
} | ||
for (const path of batch.docPaths()) { | ||
const isRefInFlight = this.batchQueue | ||
.filter(batch => batch.state === BatchState.SENT) | ||
.find(batch => batch.hasPath(path)) !== undefined; | ||
if (isRefInFlight) { | ||
// eslint-disable-next-line no-console | ||
console.warn('[BulkWriter]', `Duplicate write to document "${path}" detected.`, 'Writing to the same document multiple times will slow down BulkWriter. ' + | ||
'Write to unique documents in order to maximize throughput.'); | ||
return false; | ||
} | ||
} | ||
return true; | ||
// Visible for testing. | ||
_setMaxBatchSize(size) { | ||
this.maxBatchSize = size; | ||
} | ||
/** | ||
* Sets the maximum number of allowed operations in a batch. | ||
* Returns the rate limiter for testing. | ||
* | ||
@@ -591,7 +597,43 @@ * @private | ||
// Visible for testing. | ||
_setMaxBatchSize(size) { | ||
this.maxBatchSize = size; | ||
_getRateLimiter() { | ||
return this.rateLimiter; | ||
} | ||
} | ||
exports.BulkWriter = BulkWriter; | ||
/** | ||
* Validates the use of 'value' as BulkWriterOptions. | ||
* | ||
* @private | ||
* @param value The BulkWriterOptions object to validate. | ||
* @throws if the input is not a valid BulkWriterOptions object. | ||
*/ | ||
function validateBulkWriterOptions(value) { | ||
if (validate_1.validateOptional(value, { optional: true })) { | ||
return; | ||
} | ||
const argName = 'options'; | ||
if (!util_1.isObject(value)) { | ||
throw new Error(`${validate_1.invalidArgumentMessage(argName, 'bulkWriter() options argument')} Input is not an object.`); | ||
} | ||
const options = value; | ||
if (options.throttling === undefined || | ||
typeof options.throttling === 'boolean') { | ||
return; | ||
} | ||
if (options.throttling.initialOpsPerSecond !== undefined) { | ||
validate_1.validateInteger('initialOpsPerSecond', options.throttling.initialOpsPerSecond, { | ||
minValue: 1, | ||
}); | ||
} | ||
if (options.throttling.maxOpsPerSecond !== undefined) { | ||
validate_1.validateInteger('maxOpsPerSecond', options.throttling.maxOpsPerSecond, { | ||
minValue: 1, | ||
}); | ||
if (options.throttling.initialOpsPerSecond !== undefined && | ||
options.throttling.initialOpsPerSecond > | ||
options.throttling.maxOpsPerSecond) { | ||
throw new Error(`${validate_1.invalidArgumentMessage(argName, 'bulkWriter() options argument')} "maxOpsPerSecond" cannot be less than "initialOpsPerSecond".`); | ||
} | ||
} | ||
} | ||
//# sourceMappingURL=bulk-writer.js.map |
@@ -70,12 +70,24 @@ "use strict"; | ||
} | ||
addBundledDocument(snap) { | ||
const docProto = snap.toDocumentProto(); | ||
this.documents.set(snap.id, { | ||
document: snap.exists ? docProto : undefined, | ||
metadata: { | ||
name: docProto.name, | ||
readTime: snap.readTime.toProto().timestampValue, | ||
exists: snap.exists, | ||
}, | ||
}); | ||
addBundledDocument(snap, queryName) { | ||
const originalDocument = this.documents.get(snap.id); | ||
const originalQueries = originalDocument === null || originalDocument === void 0 ? void 0 : originalDocument.metadata.queries; | ||
// Update with document built from `snap` because it is newer. | ||
if (!originalDocument || | ||
timestamp_1.Timestamp.fromProto(originalDocument.metadata.readTime) < snap.readTime) { | ||
const docProto = snap.toDocumentProto(); | ||
this.documents.set(snap.id, { | ||
document: snap.exists ? docProto : undefined, | ||
metadata: { | ||
name: docProto.name, | ||
readTime: snap.readTime.toProto().timestampValue, | ||
exists: snap.exists, | ||
}, | ||
}); | ||
} | ||
// Update `queries` to include both original and `queryName`. | ||
const newDocument = this.documents.get(snap.id); | ||
newDocument.metadata.queries = originalQueries || []; | ||
if (queryName) { | ||
newDocument.metadata.queries.push(queryName); | ||
} | ||
if (snap.readTime > this.latestReadTime) { | ||
@@ -87,3 +99,3 @@ this.latestReadTime = snap.readTime; | ||
if (this.namedQueries.has(name)) { | ||
throw new Error(`Query name conflict: ${name} is already added.`); | ||
throw new Error(`Query name conflict: ${name} has already been added.`); | ||
} | ||
@@ -96,3 +108,3 @@ this.namedQueries.set(name, { | ||
for (const snap of querySnap.docs) { | ||
this.addBundledDocument(snap); | ||
this.addBundledDocument(snap, name); | ||
} | ||
@@ -99,0 +111,0 @@ if (querySnap.readTime > this.latestReadTime) { |
@@ -601,3 +601,3 @@ "use strict"; | ||
bulkWriter(options) { | ||
return new bulk_writer_1.BulkWriter(this, !(options === null || options === void 0 ? void 0 : options.disableThrottling)); | ||
return new bulk_writer_1.BulkWriter(this, options); | ||
} | ||
@@ -604,0 +604,0 @@ snapshot_(documentOrName, readTime, encoding) { |
@@ -47,3 +47,4 @@ "use strict"; | ||
function setLogFunction(logger) { | ||
validate_1.validateFunction('logger', logger); | ||
if (logger !== null) | ||
validate_1.validateFunction('logger', logger); | ||
logFunction = logger; | ||
@@ -50,0 +51,0 @@ } |
@@ -265,3 +265,3 @@ /*! | ||
/** | ||
* A dot-separated path for navigating sub-objects within a document. | ||
* A dot-separated path for navigating sub-objects (e.g. nested maps) within a document. | ||
* | ||
@@ -268,0 +268,0 @@ * @class |
@@ -411,3 +411,3 @@ "use strict"; | ||
/** | ||
* A dot-separated path for navigating sub-objects within a document. | ||
* A dot-separated path for navigating sub-objects (e.g. nested maps) within a document. | ||
* | ||
@@ -414,0 +414,0 @@ * @class |
@@ -19,5 +19,7 @@ /** | ||
private readonly multiplierMillis; | ||
readonly maximumCapacity: number; | ||
private readonly startTimeMillis; | ||
availableTokens: number; | ||
lastRefillTimeMillis: number; | ||
previousCapacity: number; | ||
/** | ||
@@ -28,6 +30,8 @@ * @param initialCapacity Initial maximum number of operations per second. | ||
* milliseconds. | ||
* @param maximumCapacity Maximum number of allowed operations per second. | ||
* The number of tokens added per second will never exceed this number. | ||
* @param startTimeMillis The starting time in epoch milliseconds that the | ||
* rate limit is based on. Used for testing the limiter. | ||
*/ | ||
constructor(initialCapacity: number, multiplier: number, multiplierMillis: number, startTimeMillis?: number); | ||
constructor(initialCapacity: number, multiplier: number, multiplierMillis: number, maximumCapacity: number, startTimeMillis?: number); | ||
/** | ||
@@ -34,0 +38,0 @@ * Tries to make the number of operations. Returns true if the request |
@@ -19,2 +19,3 @@ "use strict"; | ||
const assert = require("assert"); | ||
const logger_1 = require("./logger"); | ||
/** | ||
@@ -40,12 +41,16 @@ * A helper that uses the Token Bucket algorithm to rate limit the number of | ||
* milliseconds. | ||
* @param maximumCapacity Maximum number of allowed operations per second. | ||
* The number of tokens added per second will never exceed this number. | ||
* @param startTimeMillis The starting time in epoch milliseconds that the | ||
* rate limit is based on. Used for testing the limiter. | ||
*/ | ||
constructor(initialCapacity, multiplier, multiplierMillis, startTimeMillis = Date.now()) { | ||
constructor(initialCapacity, multiplier, multiplierMillis, maximumCapacity, startTimeMillis = Date.now()) { | ||
this.initialCapacity = initialCapacity; | ||
this.multiplier = multiplier; | ||
this.multiplierMillis = multiplierMillis; | ||
this.maximumCapacity = maximumCapacity; | ||
this.startTimeMillis = startTimeMillis; | ||
this.availableTokens = initialCapacity; | ||
this.lastRefillTimeMillis = startTimeMillis; | ||
this.previousCapacity = initialCapacity; | ||
} | ||
@@ -121,3 +126,7 @@ /** | ||
const millisElapsed = requestTimeMillis - this.startTimeMillis; | ||
const operationsPerSecond = Math.floor(Math.pow(this.multiplier, Math.floor(millisElapsed / this.multiplierMillis)) * this.initialCapacity); | ||
const operationsPerSecond = Math.min(Math.floor(Math.pow(this.multiplier, Math.floor(millisElapsed / this.multiplierMillis)) * this.initialCapacity), this.maximumCapacity); | ||
if (operationsPerSecond !== this.previousCapacity) { | ||
logger_1.logger('RateLimiter.calculateCapacity', null, `New request capacity: ${operationsPerSecond} operations per second.`); | ||
} | ||
this.previousCapacity = operationsPerSecond; | ||
return operationsPerSecond; | ||
@@ -124,0 +133,0 @@ } |
@@ -725,3 +725,3 @@ /*! | ||
*/ | ||
select(...fieldPaths: Array<string | FieldPath>): Query<T>; | ||
select(...fieldPaths: Array<string | FieldPath>): Query<firestore.DocumentData>; | ||
/** | ||
@@ -728,0 +728,0 @@ * Creates and returns a new [Query]{@link Query} that's additionally sorted |
@@ -216,5 +216,6 @@ "use strict"; | ||
}); | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], this.descriptors.page[methodName] || | ||
this.descriptors.stream[methodName] || | ||
this.descriptors.longrunning[methodName]); | ||
const descriptor = this.descriptors.page[methodName] || | ||
this.descriptors.longrunning[methodName] || | ||
undefined; | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], descriptor); | ||
this.innerApiCalls[methodName] = apiCall; | ||
@@ -221,0 +222,0 @@ } |
@@ -336,8 +336,9 @@ /// <reference types="node" /> | ||
* A structured query. | ||
* Filters, order bys, limits, offsets, and start/end cursors are not | ||
* supported. | ||
* Query must specify collection with all descendants and be ordered by name | ||
* ascending. Other filters, order bys, limits, offsets, and start/end | ||
* cursors are not supported. | ||
* @param {number} request.partitionCount | ||
* The desired maximum number of partition points. | ||
* The partitions may be returned across multiple pages of results. | ||
* The number must be strictly positive. The actual number of partitions | ||
* The number must be positive. The actual number of partitions | ||
* returned may be fewer. | ||
@@ -390,8 +391,9 @@ * | ||
* A structured query. | ||
* Filters, order bys, limits, offsets, and start/end cursors are not | ||
* supported. | ||
* Query must specify collection with all descendants and be ordered by name | ||
* ascending. Other filters, order bys, limits, offsets, and start/end | ||
* cursors are not supported. | ||
* @param {number} request.partitionCount | ||
* The desired maximum number of partition points. | ||
* The partitions may be returned across multiple pages of results. | ||
* The number must be strictly positive. The actual number of partitions | ||
* The number must be positive. The actual number of partitions | ||
* returned may be fewer. | ||
@@ -398,0 +400,0 @@ * |
@@ -199,5 +199,6 @@ "use strict"; | ||
}); | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], this.descriptors.page[methodName] || | ||
const descriptor = this.descriptors.page[methodName] || | ||
this.descriptors.stream[methodName] || | ||
this.descriptors.longrunning[methodName]); | ||
undefined; | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], descriptor); | ||
this.innerApiCalls[methodName] = apiCall; | ||
@@ -912,8 +913,9 @@ } | ||
* A structured query. | ||
* Filters, order bys, limits, offsets, and start/end cursors are not | ||
* supported. | ||
* Query must specify collection with all descendants and be ordered by name | ||
* ascending. Other filters, order bys, limits, offsets, and start/end | ||
* cursors are not supported. | ||
* @param {number} request.partitionCount | ||
* The desired maximum number of partition points. | ||
* The partitions may be returned across multiple pages of results. | ||
* The number must be strictly positive. The actual number of partitions | ||
* The number must be positive. The actual number of partitions | ||
* returned may be fewer. | ||
@@ -1005,8 +1007,9 @@ * | ||
* A structured query. | ||
* Filters, order bys, limits, offsets, and start/end cursors are not | ||
* supported. | ||
* Query must specify collection with all descendants and be ordered by name | ||
* ascending. Other filters, order bys, limits, offsets, and start/end | ||
* cursors are not supported. | ||
* @param {number} request.partitionCount | ||
* The desired maximum number of partition points. | ||
* The partitions may be returned across multiple pages of results. | ||
* The number must be strictly positive. The actual number of partitions | ||
* The number must be positive. The actual number of partitions | ||
* returned may be fewer. | ||
@@ -1070,8 +1073,9 @@ * | ||
* A structured query. | ||
* Filters, order bys, limits, offsets, and start/end cursors are not | ||
* supported. | ||
* Query must specify collection with all descendants and be ordered by name | ||
* ascending. Other filters, order bys, limits, offsets, and start/end | ||
* cursors are not supported. | ||
* @param {number} request.partitionCount | ||
* The desired maximum number of partition points. | ||
* The partitions may be returned across multiple pages of results. | ||
* The number must be strictly positive. The actual number of partitions | ||
* The number must be positive. The actual number of partitions | ||
* returned may be fewer. | ||
@@ -1078,0 +1082,0 @@ * |
@@ -206,5 +206,6 @@ "use strict"; | ||
}); | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], this.descriptors.page[methodName] || | ||
const descriptor = this.descriptors.page[methodName] || | ||
this.descriptors.stream[methodName] || | ||
this.descriptors.longrunning[methodName]); | ||
undefined; | ||
const apiCall = this._gaxModule.createApiCall(callPromise, this._defaults[methodName], descriptor); | ||
this.innerApiCalls[methodName] = apiCall; | ||
@@ -211,0 +212,0 @@ } |
@@ -95,6 +95,6 @@ /*! | ||
* @param retryBatch The WriteBatch that needs to be retried. | ||
* @param docsToRetry The documents from the provided WriteBatch that need | ||
* to be retried. | ||
* @param indexesToRetry The indexes of the operations from the provided | ||
* WriteBatch that need to be retried. | ||
*/ | ||
constructor(firestore: Firestore, retryBatch: WriteBatch, docsToRetry: string[]); | ||
constructor(firestore: Firestore, retryBatch: WriteBatch, indexesToRetry: Set<number>); | ||
constructor(firestore: Firestore); | ||
@@ -101,0 +101,0 @@ /** |
@@ -93,3 +93,3 @@ "use strict"; | ||
class WriteBatch { | ||
constructor(firestore, retryBatch, docsToRetry) { | ||
constructor(firestore, retryBatch, indexesToRetry) { | ||
/** | ||
@@ -108,5 +108,5 @@ * An array of document paths and the corresponding write operations that are | ||
if (retryBatch) { | ||
// Creates a new WriteBatch containing only the operations from the | ||
// provided document paths to retry. | ||
this._ops = retryBatch._ops.filter(v => docsToRetry.indexOf(v.docPath) !== -1); | ||
// Creates a new WriteBatch containing only the indexes from the provided | ||
// indexes to retry. | ||
this._ops = retryBatch._ops.filter((op, index) => indexesToRetry.has(index)); | ||
} | ||
@@ -113,0 +113,0 @@ } |
@@ -7,2 +7,26 @@ # Changelog | ||
## [4.4.0](https://www.github.com/googleapis/nodejs-firestore/compare/v4.3.0...v4.4.0) (2020-09-29) | ||
### Features | ||
* add starting/max rates to BulkWriterOptions ([#1305](https://www.github.com/googleapis/nodejs-firestore/issues/1305)) ([57dcf1c](https://www.github.com/googleapis/nodejs-firestore/commit/57dcf1c42b406a15ecb960059d67d99a97d42547)) | ||
## [4.3.0](https://www.github.com/googleapis/nodejs-firestore/compare/v4.2.0...v4.3.0) (2020-09-22) | ||
### Features | ||
* add support for != and not-in queries ([#1292](https://www.github.com/googleapis/nodejs-firestore/issues/1292)) ([786e52f](https://www.github.com/googleapis/nodejs-firestore/commit/786e52f8c8b7b9c6b84ffc988190470a063d5855)) | ||
### Bug Fixes | ||
* add capacity logging to RateLimiter ([#1287](https://www.github.com/googleapis/nodejs-firestore/issues/1287)) ([befe625](https://www.github.com/googleapis/nodejs-firestore/commit/befe625f35b7c96e9a90399a1ca71a8a049224ad)) | ||
* allow `setLogFunction(null)` ([#1304](https://www.github.com/googleapis/nodejs-firestore/issues/1304)) ([20b1226](https://www.github.com/googleapis/nodejs-firestore/commit/20b122695843bffc106f73c92e112144f0b96070)) | ||
* bulkWriter: writing to the same document does not create a new batch ([#1298](https://www.github.com/googleapis/nodejs-firestore/issues/1298)) ([6243d62](https://www.github.com/googleapis/nodejs-firestore/commit/6243d625481e8f9a852b4a3bf8d77ca9cbca4dd3)) | ||
* change typings for select() to return `Query<DocumentData>` ([#1303](https://www.github.com/googleapis/nodejs-firestore/issues/1303)) ([b678857](https://www.github.com/googleapis/nodejs-firestore/commit/b678857afcdf14be5d645d7552e5f4aa4183b037)) | ||
* correct BulkWriter types in firestore.d.ts ([#1284](https://www.github.com/googleapis/nodejs-firestore/issues/1284)) ([382128b](https://www.github.com/googleapis/nodejs-firestore/commit/382128b83de01cc0f88110393a1271b8d768509e)) | ||
## [4.2.0](https://www.github.com/googleapis/nodejs-firestore/compare/v4.1.2...v4.2.0) (2020-07-31) | ||
@@ -9,0 +33,0 @@ |
{ | ||
"name": "@google-cloud/firestore", | ||
"description": "Firestore Client Library for Node.js", | ||
"version": "4.2.0", | ||
"version": "4.4.0", | ||
"license": "Apache-2.0", | ||
@@ -85,3 +85,3 @@ "author": "Google Inc.", | ||
"sinon": "^9.0.2", | ||
"ts-node": "^8.5.4", | ||
"ts-node": "^9.0.0", | ||
"typescript": "3.8.3", | ||
@@ -88,0 +88,0 @@ "through2": "^4.0.0", |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
4408681
81973