You're Invited: Meet the Socket team at BSidesSF and RSAC - April 27 - May 1.RSVP

@google-cloud/bigquery-storage

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@google-cloud/bigquery-storage - npm Package Compare versions

Comparing version

to
2.8.0

@@ -5,8 +5,11 @@ import * as v1 from './v1';

declare type BigQueryReadClient = v1.BigQueryReadClient;
declare const BigQueryWriteClient: typeof v1.BigQueryWriteClient;
declare type BigQueryWriteClient = v1.BigQueryWriteClient;
declare const BigQueryStorageClient: typeof v1beta1.BigQueryStorageClient;
declare type BigQueryStorageClient = v1beta1.BigQueryStorageClient;
export { v1, BigQueryReadClient, v1beta1, BigQueryStorageClient };
export { v1, BigQueryReadClient, v1beta1, BigQueryStorageClient, BigQueryWriteClient, };
declare const _default: {
v1: typeof v1;
BigQueryReadClient: typeof v1.BigQueryReadClient;
BigQueryWriteClient: typeof v1.BigQueryWriteClient;
};

@@ -13,0 +16,0 @@ export default _default;

@@ -20,3 +20,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
exports.protos = exports.BigQueryStorageClient = exports.v1beta1 = exports.BigQueryReadClient = exports.v1 = void 0;
exports.protos = exports.BigQueryWriteClient = exports.BigQueryStorageClient = exports.v1beta1 = exports.BigQueryReadClient = exports.v1 = void 0;
const v1 = require("./v1");

@@ -28,2 +28,4 @@ exports.v1 = v1;

exports.BigQueryReadClient = BigQueryReadClient;
const BigQueryWriteClient = v1.BigQueryWriteClient;
exports.BigQueryWriteClient = BigQueryWriteClient;
const BigQueryStorageClient = v1beta1.BigQueryStorageClient;

@@ -33,5 +35,5 @@ exports.BigQueryStorageClient = BigQueryStorageClient;

// tslint:disable-next-line no-default-export
exports.default = { v1, BigQueryReadClient };
exports.default = { v1, BigQueryReadClient, BigQueryWriteClient };
const protos = require("../protos/protos");
exports.protos = protos;
//# sourceMappingURL=index.js.map

@@ -37,3 +37,4 @@ {

"SplitReadStream": {
"retry_codes_name": "non_idempotent",
"timeout_millis": 600000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"

@@ -40,0 +41,0 @@ }

@@ -104,5 +104,88 @@ import * as gax from 'google-gax';

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Creates a new read session. A read session divides the contents of a
* BigQuery table into one or more streams, which can then be used to read
* data from the table. The read session also specifies properties of the
* data to be read, such as a list of columns or a push-down filter describing
* the rows to be returned.
*
* A particular row can be read by at most one stream. When the caller has
* reached the end of each stream in the session, then all the data in the
* table has been read.
*
* Data is assigned to each stream such that roughly the same number of
* rows can be read from each stream. Because the server-side unit for
* assigning data is collections of rows, the API does not guarantee that
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
*
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The request project that owns the session, in the form of
* `projects/{project_id}`.
* @param {google.cloud.bigquery.storage.v1.ReadSession} request.readSession
* Required. Session to be created.
* @param {number} request.maxStreamCount
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
* depending on the amount parallelism that is reasonable for the table. Error
* will be returned if the max count is greater than the current system
* max limit of 1,000.
*
* Streams must be read starting from offset 0.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1.ReadSession}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_read.create_read_session.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async
*/
createReadSession(request?: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IReadSession, (protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest | undefined), {} | undefined]>;
createReadSession(request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IReadSession, protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest | null | undefined, {} | null | undefined>): void;
createReadSession(request: protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1.IReadSession, protos.google.cloud.bigquery.storage.v1.ICreateReadSessionRequest | null | undefined, {} | null | undefined>): void;
/**
* Splits a given `ReadStream` into two `ReadStream` objects. These
* `ReadStream` objects are referred to as the primary and the residual
* streams of the split. The original `ReadStream` can still be read from in
* the same manner as before. Both of the returned `ReadStream` objects can
* also be read from, and the rows returned by both child streams will be
* the same as the rows read from the original stream.
*
* Moreover, the two child streams will be allocated back-to-back in the
* original `ReadStream`. Concretely, it is guaranteed that for streams
* original, primary, and residual, that original[0-j] = primary[0-j] and
* original[j-n] = residual[0-m] once the streams have been read to
* completion.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to split.
* @param {number} request.fraction
* A value in the range (0.0, 1.0) that specifies the fractional point at
* which the original stream should be split. The actual split point is
* evaluated on pre-filtered rows, so if a filter is provided, then there is
* no guarantee that the division of the rows between the new child streams
* will be proportional to this fractional value. Additionally, because the
* server-side unit for assigning data is collections of rows, this fraction
* will always map to a data storage boundary on the server side.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1.SplitReadStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_read.split_read_stream.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async
*/
splitReadStream(request?: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, (protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest | undefined), {} | undefined]>;

@@ -135,6 +218,4 @@ splitReadStream(request: protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.ISplitReadStreamResponse, protos.google.cloud.bigquery.storage.v1.ISplitReadStreamRequest | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const stream = client.readRows(request);
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* @example <caption>include:samples/generated/v1/big_query_read.read_rows.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryRead_ReadRows_async
*/

@@ -141,0 +222,0 @@ readRows(request?: protos.google.cloud.bigquery.storage.v1.IReadRowsRequest, options?: CallOptions): gax.CancellableStream;

@@ -243,49 +243,2 @@ "use strict";

}
/**
* Creates a new read session. A read session divides the contents of a
* BigQuery table into one or more streams, which can then be used to read
* data from the table. The read session also specifies properties of the
* data to be read, such as a list of columns or a push-down filter describing
* the rows to be returned.
*
* A particular row can be read by at most one stream. When the caller has
* reached the end of each stream in the session, then all the data in the
* table has been read.
*
* Data is assigned to each stream such that roughly the same number of
* rows can be read from each stream. Because the server-side unit for
* assigning data is collections of rows, the API does not guarantee that
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
*
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The request project that owns the session, in the form of
* `projects/{project_id}`.
* @param {google.cloud.bigquery.storage.v1.ReadSession} request.readSession
* Required. Session to be created.
* @param {number} request.maxStreamCount
* Max initial number of streams. If unset or zero, the server will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
* depending on the amount parallelism that is reasonable for the table. Error
* will be returned if the max count is greater than the current system
* max limit of 1,000.
*
* Streams must be read starting from offset 0.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1.ReadSession}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.createReadSession(request);
*/
createReadSession(request, optionsOrCallback, callback) {

@@ -311,38 +264,2 @@ request = request || {};

}
/**
* Splits a given `ReadStream` into two `ReadStream` objects. These
* `ReadStream` objects are referred to as the primary and the residual
* streams of the split. The original `ReadStream` can still be read from in
* the same manner as before. Both of the returned `ReadStream` objects can
* also be read from, and the rows returned by both child streams will be
* the same as the rows read from the original stream.
*
* Moreover, the two child streams will be allocated back-to-back in the
* original `ReadStream`. Concretely, it is guaranteed that for streams
* original, primary, and residual, that original[0-j] = primary[0-j] and
* original[j-n] = residual[0-m] once the streams have been read to
* completion.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to split.
* @param {number} request.fraction
* A value in the range (0.0, 1.0) that specifies the fractional point at
* which the original stream should be split. The actual split point is
* evaluated on pre-filtered rows, so if a filter is provided, then there is
* no guarantee that the division of the rows between the new child streams
* will be proportional to this fractional value. Additionally, because the
* server-side unit for assigning data is collections of rows, this fraction
* will always map to a data storage boundary on the server side.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1.SplitReadStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.splitReadStream(request);
*/
splitReadStream(request, optionsOrCallback, callback) {

@@ -392,6 +309,4 @@ request = request || {};

* for more details and examples.
* @example
* const stream = client.readRows(request);
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* @example <caption>include:samples/generated/v1/big_query_read.read_rows.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryRead_ReadRows_async
*/

@@ -398,0 +313,0 @@ readRows(request, options) {

@@ -107,14 +107,128 @@ import * as gax from 'google-gax';

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Creates a write stream to the given table.
* Additionally, every table has a special stream named '_default'
* to which data can be written. This stream doesn't need to be created using
* CreateWriteStream. It is a stream that can be used simultaneously by any
* number of clients. Data written to this stream is considered committed as
* soon as an acknowledgement is received.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. Reference to the table to which the stream belongs, in the format
* of `projects/{project}/datasets/{dataset}/tables/{table}`.
* @param {google.cloud.bigquery.storage.v1.WriteStream} request.writeStream
* Required. Stream to be created.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_write.create_write_stream.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async
*/
createWriteStream(request?: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IWriteStream, (protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest | undefined), {} | undefined]>;
createWriteStream(request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IWriteStream, protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest | null | undefined, {} | null | undefined>): void;
createWriteStream(request: protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1.IWriteStream, protos.google.cloud.bigquery.storage.v1.ICreateWriteStreamRequest | null | undefined, {} | null | undefined>): void;
/**
* Gets information about a write stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to get, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_write.get_write_stream.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async
*/
getWriteStream(request?: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IWriteStream, (protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest | undefined), {} | undefined]>;
getWriteStream(request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IWriteStream, protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest | null | undefined, {} | null | undefined>): void;
getWriteStream(request: protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1.IWriteStream, protos.google.cloud.bigquery.storage.v1.IGetWriteStreamRequest | null | undefined, {} | null | undefined>): void;
/**
* Finalize a write stream so that no new data can be appended to the
* stream. Finalize is not supported on the '_default' stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to finalize, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [FinalizeWriteStreamResponse]{@link google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_write.finalize_write_stream.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async
*/
finalizeWriteStream(request?: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, (protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest | undefined), {} | undefined]>;
finalizeWriteStream(request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest | null | undefined, {} | null | undefined>): void;
finalizeWriteStream(request: protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamResponse, protos.google.cloud.bigquery.storage.v1.IFinalizeWriteStreamRequest | null | undefined, {} | null | undefined>): void;
/**
* Atomically commits a group of `PENDING` streams that belong to the same
* `parent` table.
*
* Streams must be finalized before commit and cannot be committed multiple
* times. Once a stream is committed, data in the stream becomes available
* for read operations.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. Parent table that all the streams should belong to, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}`.
* @param {string[]} request.writeStreams
* Required. The group of streams that will be committed atomically.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [BatchCommitWriteStreamsResponse]{@link google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_write.batch_commit_write_streams.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async
*/
batchCommitWriteStreams(request?: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, (protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest | undefined), {} | undefined]>;
batchCommitWriteStreams(request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest | null | undefined, {} | null | undefined>): void;
batchCommitWriteStreams(request: protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsResponse, protos.google.cloud.bigquery.storage.v1.IBatchCommitWriteStreamsRequest | null | undefined, {} | null | undefined>): void;
/**
* Flushes rows to a BUFFERED stream.
*
* If users are appending rows to BUFFERED stream, flush operation is
* required in order for the rows to become available for reading. A
* Flush operation flushes up to any previously flushed offset in a BUFFERED
* stream, to the offset specified in the request.
*
* Flush is not supported on the _default stream, since it is not BUFFERED.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.writeStream
* Required. The stream that is the target of the flush operation.
* @param {google.protobuf.Int64Value} request.offset
* Ending offset of the flush operation. Rows before this offset(including
* this offset) will be flushed.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [FlushRowsResponse]{@link google.cloud.bigquery.storage.v1.FlushRowsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/big_query_write.flush_rows.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async
*/
flushRows(request?: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest | undefined, {} | undefined]>;

@@ -165,8 +279,4 @@ flushRows(request: protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1.IFlushRowsResponse, protos.google.cloud.bigquery.storage.v1.IFlushRowsRequest | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const stream = client.appendRows();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1/big_query_write.append_rows.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async
*/

@@ -173,0 +283,0 @@ appendRows(options?: CallOptions): gax.CancellableStream;

@@ -249,27 +249,2 @@ "use strict";

}
/**
* Creates a write stream to the given table.
* Additionally, every table has a special stream named '_default'
* to which data can be written. This stream doesn't need to be created using
* CreateWriteStream. It is a stream that can be used simultaneously by any
* number of clients. Data written to this stream is considered committed as
* soon as an acknowledgement is received.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. Reference to the table to which the stream belongs, in the format
* of `projects/{project}/datasets/{dataset}/tables/{table}`.
* @param {google.cloud.bigquery.storage.v1.WriteStream} request.writeStream
* Required. Stream to be created.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.createWriteStream(request);
*/
createWriteStream(request, optionsOrCallback, callback) {

@@ -295,20 +270,2 @@ request = request || {};

}
/**
* Gets information about a write stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to get, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [WriteStream]{@link google.cloud.bigquery.storage.v1.WriteStream}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.getWriteStream(request);
*/
getWriteStream(request, optionsOrCallback, callback) {

@@ -334,21 +291,2 @@ request = request || {};

}
/**
* Finalize a write stream so that no new data can be appended to the
* stream. Finalize is not supported on the '_default' stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. Name of the stream to finalize, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [FinalizeWriteStreamResponse]{@link google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.finalizeWriteStream(request);
*/
finalizeWriteStream(request, optionsOrCallback, callback) {

@@ -374,27 +312,2 @@ request = request || {};

}
/**
* Atomically commits a group of `PENDING` streams that belong to the same
* `parent` table.
*
* Streams must be finalized before commit and cannot be committed multiple
* times. Once a stream is committed, data in the stream becomes available
* for read operations.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. Parent table that all the streams should belong to, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}`.
* @param {string[]} request.writeStreams
* Required. The group of streams that will be committed atomically.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [BatchCommitWriteStreamsResponse]{@link google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.batchCommitWriteStreams(request);
*/
batchCommitWriteStreams(request, optionsOrCallback, callback) {

@@ -420,29 +333,2 @@ request = request || {};

}
/**
* Flushes rows to a BUFFERED stream.
*
* If users are appending rows to BUFFERED stream, flush operation is
* required in order for the rows to become available for reading. A
* Flush operation flushes up to any previously flushed offset in a BUFFERED
* stream, to the offset specified in the request.
*
* Flush is not supported on the _default stream, since it is not BUFFERED.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.writeStream
* Required. The stream that is the target of the flush operation.
* @param {google.protobuf.Int64Value} request.offset
* Ending offset of the flush operation. Rows before this offset(including
* this offset) will be flushed.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [FlushRowsResponse]{@link google.cloud.bigquery.storage.v1.FlushRowsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.flushRows(request);
*/
flushRows(request, optionsOrCallback, callback) {

@@ -510,8 +396,4 @@ request = request || {};

* for more details and examples.
* @example
* const stream = client.appendRows();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1/big_query_write.append_rows.js</caption>
* region_tag:bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async
*/

@@ -518,0 +400,0 @@ appendRows(options) {

@@ -104,11 +104,151 @@ import * as gax from 'google-gax';

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Creates a new read session. A read session divides the contents of a
* BigQuery table into one or more streams, which can then be used to read
* data from the table. The read session also specifies properties of the
* data to be read, such as a list of columns or a push-down filter describing
* the rows to be returned.
*
* A particular row can be read by at most one stream. When the caller has
* reached the end of each stream in the session, then all the data in the
* table has been read.
*
* Read sessions automatically expire 24 hours after they are created and do
* not require manual clean-up by the caller.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.TableReference} request.tableReference
* Required. Reference to the table to read.
* @param {string} request.parent
* Required. String of the form `projects/{project_id}` indicating the
* project this ReadSession is associated with. This is the project that will
* be billed for usage.
* @param {google.cloud.bigquery.storage.v1beta1.TableModifiers} request.tableModifiers
* Any modifiers to the Table (e.g. snapshot timestamp).
* @param {number} request.requestedStreams
* Initial number of streams. If unset or 0, we will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
* depending on the amount parallelism that is reasonable for the table and
* the maximum amount of parallelism allowed by the system.
*
* Streams must be read starting from offset 0.
* @param {google.cloud.bigquery.storage.v1beta1.TableReadOptions} request.readOptions
* Read options for this session (e.g. column selection, filters).
* @param {google.cloud.bigquery.storage.v1beta1.DataFormat} request.format
* Data output format. Currently default to Avro.
* @param {google.cloud.bigquery.storage.v1beta1.ShardingStrategy} request.shardingStrategy
* The strategy to use for distributing data among multiple streams. Currently
* defaults to liquid sharding.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1beta1.ReadSession}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1beta1/big_query_storage.create_read_session.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_CreateReadSession_async
*/
createReadSession(request?: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1beta1.IReadSession, (protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest | undefined), {} | undefined]>;
createReadSession(request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1beta1.IReadSession, protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest | null | undefined, {} | null | undefined>): void;
createReadSession(request: protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1beta1.IReadSession, protos.google.cloud.bigquery.storage.v1beta1.ICreateReadSessionRequest | null | undefined, {} | null | undefined>): void;
/**
* Creates additional streams for a ReadSession. This API can be used to
* dynamically adjust the parallelism of a batch processing task upwards by
* adding additional workers.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.ReadSession} request.session
* Required. Must be a non-expired session obtained from a call to
* CreateReadSession. Only the name field needs to be set.
* @param {number} request.requestedStreams
* Required. Number of new streams requested. Must be positive.
* Number of added streams may be less than this, see CreateReadSessionRequest
* for more information.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [BatchCreateReadSessionStreamsResponse]{@link google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1beta1/big_query_storage.batch_create_read_session_streams.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_BatchCreateReadSessionStreams_async
*/
batchCreateReadSessionStreams(request?: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, (protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest | undefined), {} | undefined]>;
batchCreateReadSessionStreams(request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest | null | undefined, {} | null | undefined>): void;
batchCreateReadSessionStreams(request: protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest, callback: Callback<protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsResponse, protos.google.cloud.bigquery.storage.v1beta1.IBatchCreateReadSessionStreamsRequest | null | undefined, {} | null | undefined>): void;
/**
* Triggers the graceful termination of a single stream in a ReadSession. This
* API can be used to dynamically adjust the parallelism of a batch processing
* task downwards without losing data.
*
* This API does not delete the stream -- it remains visible in the
* ReadSession, and any data processed by the stream is not released to other
* streams. However, no additional data will be assigned to the stream once
* this call completes. Callers must continue reading data on the stream until
* the end of the stream is reached so that data which has already been
* assigned to the stream will be processed.
*
* This method will return an error if there are no other live streams
* in the Session, or if SplitReadStream() has been called on the given
* Stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.Stream} request.stream
* Required. Stream to finalize.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1beta1/big_query_storage.finalize_stream.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_FinalizeStream_async
*/
finalizeStream(request?: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, options?: CallOptions): Promise<[protos.google.protobuf.IEmpty, (protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest | undefined), {} | undefined]>;
finalizeStream(request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, options: CallOptions, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest | null | undefined, {} | null | undefined>): void;
finalizeStream(request: protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.bigquery.storage.v1beta1.IFinalizeStreamRequest | null | undefined, {} | null | undefined>): void;
/**
* Splits a given read stream into two Streams. These streams are referred to
* as the primary and the residual of the split. The original stream can still
* be read from in the same manner as before. Both of the returned streams can
* also be read from, and the total rows return by both child streams will be
* the same as the rows read from the original stream.
*
* Moreover, the two child streams will be allocated back to back in the
* original Stream. Concretely, it is guaranteed that for streams Original,
* Primary, and Residual, that Original[0-j] = Primary[0-j] and
* Original[j-n] = Residual[0-m] once the streams have been read to
* completion.
*
* This method is guaranteed to be idempotent.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.Stream} request.originalStream
* Required. Stream to split.
* @param {number} request.fraction
* A value in the range (0.0, 1.0) that specifies the fractional point at
* which the original stream should be split. The actual split point is
* evaluated on pre-filtered rows, so if a filter is provided, then there is
* no guarantee that the division of the rows between the new child streams
* will be proportional to this fractional value. Additionally, because the
* server-side unit for assigning data is collections of rows, this fraction
* will always map to to a data storage boundary on the server side.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1beta1/big_query_storage.split_read_stream.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_SplitReadStream_async
*/
splitReadStream(request?: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, options?: CallOptions): Promise<[protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, (protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest | undefined), {} | undefined]>;

@@ -141,6 +281,4 @@ splitReadStream(request: protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest, options: CallOptions, callback: Callback<protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamResponse, protos.google.cloud.bigquery.storage.v1beta1.ISplitReadStreamRequest | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const stream = client.readRows(request);
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* @example <caption>include:samples/generated/v1beta1/big_query_storage.read_rows.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async
*/

@@ -147,0 +285,0 @@ readRows(request?: protos.google.cloud.bigquery.storage.v1beta1.IReadRowsRequest, options?: CallOptions): gax.CancellableStream;

@@ -244,51 +244,2 @@ "use strict";

}
/**
* Creates a new read session. A read session divides the contents of a
* BigQuery table into one or more streams, which can then be used to read
* data from the table. The read session also specifies properties of the
* data to be read, such as a list of columns or a push-down filter describing
* the rows to be returned.
*
* A particular row can be read by at most one stream. When the caller has
* reached the end of each stream in the session, then all the data in the
* table has been read.
*
* Read sessions automatically expire 24 hours after they are created and do
* not require manual clean-up by the caller.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.TableReference} request.tableReference
* Required. Reference to the table to read.
* @param {string} request.parent
* Required. String of the form `projects/{project_id}` indicating the
* project this ReadSession is associated with. This is the project that will
* be billed for usage.
* @param {google.cloud.bigquery.storage.v1beta1.TableModifiers} request.tableModifiers
* Any modifiers to the Table (e.g. snapshot timestamp).
* @param {number} request.requestedStreams
* Initial number of streams. If unset or 0, we will
* provide a value of streams so as to produce reasonable throughput. Must be
* non-negative. The number of streams may be lower than the requested number,
* depending on the amount parallelism that is reasonable for the table and
* the maximum amount of parallelism allowed by the system.
*
* Streams must be read starting from offset 0.
* @param {google.cloud.bigquery.storage.v1beta1.TableReadOptions} request.readOptions
* Read options for this session (e.g. column selection, filters).
* @param {google.cloud.bigquery.storage.v1beta1.DataFormat} request.format
* Data output format. Currently default to Avro.
* @param {google.cloud.bigquery.storage.v1beta1.ShardingStrategy} request.shardingStrategy
* The strategy to use for distributing data among multiple streams. Currently
* defaults to liquid sharding.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [ReadSession]{@link google.cloud.bigquery.storage.v1beta1.ReadSession}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.createReadSession(request);
*/
createReadSession(request, optionsOrCallback, callback) {

@@ -315,26 +266,2 @@ request = request || {};

}
/**
* Creates additional streams for a ReadSession. This API can be used to
* dynamically adjust the parallelism of a batch processing task upwards by
* adding additional workers.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.ReadSession} request.session
* Required. Must be a non-expired session obtained from a call to
* CreateReadSession. Only the name field needs to be set.
* @param {number} request.requestedStreams
* Required. Number of new streams requested. Must be positive.
* Number of added streams may be less than this, see CreateReadSessionRequest
* for more information.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [BatchCreateReadSessionStreamsResponse]{@link google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.batchCreateReadSessionStreams(request);
*/
batchCreateReadSessionStreams(request, optionsOrCallback, callback) {

@@ -360,32 +287,2 @@ request = request || {};

}
/**
* Triggers the graceful termination of a single stream in a ReadSession. This
* API can be used to dynamically adjust the parallelism of a batch processing
* task downwards without losing data.
*
* This API does not delete the stream -- it remains visible in the
* ReadSession, and any data processed by the stream is not released to other
* streams. However, no additional data will be assigned to the stream once
* this call completes. Callers must continue reading data on the stream until
* the end of the stream is reached so that data which has already been
* assigned to the stream will be processed.
*
* This method will return an error if there are no other live streams
* in the Session, or if SplitReadStream() has been called on the given
* Stream.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.Stream} request.stream
* Required. Stream to finalize.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.finalizeStream(request);
*/
finalizeStream(request, optionsOrCallback, callback) {

@@ -411,39 +308,2 @@ request = request || {};

}
/**
* Splits a given read stream into two Streams. These streams are referred to
* as the primary and the residual of the split. The original stream can still
* be read from in the same manner as before. Both of the returned streams can
* also be read from, and the total rows return by both child streams will be
* the same as the rows read from the original stream.
*
* Moreover, the two child streams will be allocated back to back in the
* original Stream. Concretely, it is guaranteed that for streams Original,
* Primary, and Residual, that Original[0-j] = Primary[0-j] and
* Original[j-n] = Residual[0-m] once the streams have been read to
* completion.
*
* This method is guaranteed to be idempotent.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.bigquery.storage.v1beta1.Stream} request.originalStream
* Required. Stream to split.
* @param {number} request.fraction
* A value in the range (0.0, 1.0) that specifies the fractional point at
* which the original stream should be split. The actual split point is
* evaluated on pre-filtered rows, so if a filter is provided, then there is
* no guarantee that the division of the rows between the new child streams
* will be proportional to this fractional value. Additionally, because the
* server-side unit for assigning data is collections of rows, this fraction
* will always map to to a data storage boundary on the server side.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [SplitReadStreamResponse]{@link google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.splitReadStream(request);
*/
splitReadStream(request, optionsOrCallback, callback) {

@@ -493,6 +353,4 @@ request = request || {};

* for more details and examples.
* @example
* const stream = client.readRows(request);
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* @example <caption>include:samples/generated/v1beta1/big_query_storage.read_rows.js</caption>
* region_tag:bigquerystorage_v1beta1_generated_BigQueryStorage_ReadRows_async
*/

@@ -499,0 +357,0 @@ readRows(request, options) {

# Changelog
## [2.8.0](https://www.github.com/googleapis/nodejs-bigquery-storage/compare/v2.7.0...v2.8.0) (2021-12-30)
### Features
* add write_mode support for BigQuery Storage Write API v1 ([#228](https://www.github.com/googleapis/nodejs-bigquery-storage/issues/228)) ([18f3123](https://www.github.com/googleapis/nodejs-bigquery-storage/commit/18f3123275716d49460f77cbbc1a4547412087d2))
## [2.7.0](https://www.github.com/googleapis/nodejs-bigquery-storage/compare/v2.6.2...v2.7.0) (2021-09-27)

@@ -4,0 +11,0 @@

{
"name": "@google-cloud/bigquery-storage",
"version": "2.7.0",
"version": "2.8.0",
"description": "Client for the BigQuery Storage API",

@@ -34,3 +34,3 @@ "repository": "googleapis/nodejs-bigquery-storage",

"@types/mocha": "^8.0.0",
"@types/node": "^14.0.0",
"@types/node": "^16.0.0",
"@types/sinon": "^10.0.0",

@@ -46,3 +46,3 @@ "c8": "^7.1.0",

"pack-n-play": "^1.0.0-2",
"sinon": "^11.0.0",
"sinon": "^12.0.0",
"ts-loader": "^9.0.0",

@@ -49,0 +49,0 @@ "typescript": "^3.8.3",

@@ -232,2 +232,4 @@ [//]: # "This README.md file is auto-generated, all changes to this file will be lost."

More Information: [Google Cloud Platform Launch Stages][launch_stages]

@@ -234,0 +236,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet