Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@google-cloud/speech

Package Overview
Dependencies
Maintainers
1
Versions
109
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@google-cloud/speech - npm Package Compare versions

Comparing version 4.9.0 to 4.10.0

build/protos/google/cloud/speech/v1/resource.proto

134

build/src/v1/speech_client.d.ts

@@ -23,2 +23,5 @@ import * as gax from 'google-gax';

};
pathTemplates: {
[name: string]: gax.PathTemplate;
};
operationsClient: gax.OperationsClient;

@@ -101,2 +104,23 @@ speechStub?: Promise<{

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1.RecognizeResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1/speech.recognize.js</caption>
* region_tag:speech_v1_generated_Speech_Recognize_async
*/
recognize(request?: protos.google.cloud.speech.v1.IRecognizeRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1.IRecognizeResponse, protos.google.cloud.speech.v1.IRecognizeRequest | undefined, {} | undefined]>;

@@ -118,10 +142,35 @@ recognize(request: protos.google.cloud.speech.v1.IRecognizeRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1.IRecognizeResponse, protos.google.cloud.speech.v1.IRecognizeRequest | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const stream = client.streamingRecognize();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1/speech.streaming_recognize.js</caption>
* region_tag:speech_v1_generated_Speech_StreamingRecognize_async
*/
_streamingRecognize(options?: CallOptions): gax.CancellableStream;
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
* `Operation.error` or an `Operation.response` which contains
* a `LongRunningRecognizeResponse` message.
* For more information on asynchronous speech recognition, see the
* [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {google.cloud.speech.v1.TranscriptOutputConfig} [request.outputConfig]
* Optional. Specifies an optional destination for the recognition results.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing
* a long running operation. Its `promise()` method returns a promise
* you can `await` for.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations)
* for more details and examples.
* @example <caption>include:samples/generated/v1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1_generated_Speech_LongRunningRecognize_async
*/
longRunningRecognize(request?: protos.google.cloud.speech.v1.ILongRunningRecognizeRequest, options?: CallOptions): Promise<[LROperation<protos.google.cloud.speech.v1.ILongRunningRecognizeResponse, protos.google.cloud.speech.v1.ILongRunningRecognizeMetadata>, protos.google.longrunning.IOperation | undefined, {} | undefined]>;

@@ -139,10 +188,73 @@ longRunningRecognize(request: protos.google.cloud.speech.v1.ILongRunningRecognizeRequest, options: CallOptions, callback: Callback<LROperation<protos.google.cloud.speech.v1.ILongRunningRecognizeResponse, protos.google.cloud.speech.v1.ILongRunningRecognizeMetadata>, protos.google.longrunning.IOperation | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const decodedOperation = await checkLongRunningRecognizeProgress(name);
* console.log(decodedOperation.result);
* console.log(decodedOperation.done);
* console.log(decodedOperation.metadata);
* @example <caption>include:samples/generated/v1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1_generated_Speech_LongRunningRecognize_async
*/
checkLongRunningRecognizeProgress(name: string): Promise<LROperation<protos.google.cloud.speech.v1.LongRunningRecognizeResponse, protos.google.cloud.speech.v1.LongRunningRecognizeMetadata>>;
/**
* Return a fully-qualified customClass resource name string.
*
* @param {string} project
* @param {string} location
* @param {string} custom_class
* @returns {string} Resource name string.
*/
customClassPath(project: string, location: string, customClass: string): string;
/**
* Parse the project from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the project.
*/
matchProjectFromCustomClassName(customClassName: string): string | number;
/**
* Parse the location from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the location.
*/
matchLocationFromCustomClassName(customClassName: string): string | number;
/**
* Parse the custom_class from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the custom_class.
*/
matchCustomClassFromCustomClassName(customClassName: string): string | number;
/**
* Return a fully-qualified phraseSet resource name string.
*
* @param {string} project
* @param {string} location
* @param {string} phrase_set
* @returns {string} Resource name string.
*/
phraseSetPath(project: string, location: string, phraseSet: string): string;
/**
* Parse the project from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the project.
*/
matchProjectFromPhraseSetName(phraseSetName: string): string | number;
/**
* Parse the location from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the location.
*/
matchLocationFromPhraseSetName(phraseSetName: string): string | number;
/**
* Parse the phrase_set from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the phrase_set.
*/
matchPhraseSetFromPhraseSetName(phraseSetName: string): string | number;
/**
* Terminate the gRPC channel and close the client.

@@ -149,0 +261,0 @@ *

176

build/src/v1/speech_client.js

@@ -128,2 +128,9 @@ "use strict";

this._protos = this._gaxGrpc.loadProtoJSON(jsonProtos);
// This API contains "path templates"; forward-slash-separated
// identifiers to uniquely identify resources within the API.
// Create useful helper objects for these.
this.pathTemplates = {
customClassPathTemplate: new this._gaxModule.PathTemplate('projects/{project}/locations/{location}/customClasses/{custom_class}'),
phraseSetPathTemplate: new this._gaxModule.PathTemplate('projects/{project}/locations/{location}/phraseSets/{phrase_set}'),
};
// Some of the methods on this service provide streaming responses.

@@ -246,23 +253,2 @@ // Provide descriptors for these.

}
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1.RecognizeResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.recognize(request);
*/
recognize(request, optionsOrCallback, callback) {

@@ -279,2 +265,4 @@ request = request || {};

options = options || {};
options.otherArgs = options.otherArgs || {};
options.otherArgs.headers = options.otherArgs.headers || {};
this.initialize();

@@ -296,8 +284,4 @@ return this.innerApiCalls.recognize(request, options, callback);

* for more details and examples.
* @example
* const stream = client.streamingRecognize();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1/speech.streaming_recognize.js</caption>
* region_tag:speech_v1_generated_Speech_StreamingRecognize_async
*/

@@ -308,32 +292,2 @@ _streamingRecognize(options) {

}
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
* `Operation.error` or an `Operation.response` which contains
* a `LongRunningRecognizeResponse` message.
* For more information on asynchronous speech recognition, see the
* [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {google.cloud.speech.v1.TranscriptOutputConfig} [request.outputConfig]
* Optional. Specifies an optional destination for the recognition results.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing
* a long running operation. Its `promise()` method returns a promise
* you can `await` for.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations)
* for more details and examples.
* @example
* const [operation] = await client.longRunningRecognize(request);
* const [response] = await operation.promise();
*/
longRunningRecognize(request, optionsOrCallback, callback) {

@@ -350,2 +304,4 @@ request = request || {};

options = options || {};
options.otherArgs = options.otherArgs || {};
options.otherArgs.headers = options.otherArgs.headers || {};
this.initialize();

@@ -363,7 +319,4 @@ return this.innerApiCalls.longRunningRecognize(request, options, callback);

* for more details and examples.
* @example
* const decodedOperation = await checkLongRunningRecognizeProgress(name);
* console.log(decodedOperation.result);
* console.log(decodedOperation.done);
* console.log(decodedOperation.metadata);
* @example <caption>include:samples/generated/v1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1_generated_Speech_LongRunningRecognize_async
*/

@@ -376,3 +329,102 @@ async checkLongRunningRecognizeProgress(name) {

}
// --------------------
// -- Path templates --
// --------------------
/**
* Return a fully-qualified customClass resource name string.
*
* @param {string} project
* @param {string} location
* @param {string} custom_class
* @returns {string} Resource name string.
*/
customClassPath(project, location, customClass) {
return this.pathTemplates.customClassPathTemplate.render({
project: project,
location: location,
custom_class: customClass,
});
}
/**
* Parse the project from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the project.
*/
matchProjectFromCustomClassName(customClassName) {
return this.pathTemplates.customClassPathTemplate.match(customClassName)
.project;
}
/**
* Parse the location from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the location.
*/
matchLocationFromCustomClassName(customClassName) {
return this.pathTemplates.customClassPathTemplate.match(customClassName)
.location;
}
/**
* Parse the custom_class from CustomClass resource.
*
* @param {string} customClassName
* A fully-qualified path representing CustomClass resource.
* @returns {string} A string representing the custom_class.
*/
matchCustomClassFromCustomClassName(customClassName) {
return this.pathTemplates.customClassPathTemplate.match(customClassName)
.custom_class;
}
/**
* Return a fully-qualified phraseSet resource name string.
*
* @param {string} project
* @param {string} location
* @param {string} phrase_set
* @returns {string} Resource name string.
*/
phraseSetPath(project, location, phraseSet) {
return this.pathTemplates.phraseSetPathTemplate.render({
project: project,
location: location,
phrase_set: phraseSet,
});
}
/**
* Parse the project from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the project.
*/
matchProjectFromPhraseSetName(phraseSetName) {
return this.pathTemplates.phraseSetPathTemplate.match(phraseSetName)
.project;
}
/**
* Parse the location from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the location.
*/
matchLocationFromPhraseSetName(phraseSetName) {
return this.pathTemplates.phraseSetPathTemplate.match(phraseSetName)
.location;
}
/**
* Parse the phrase_set from PhraseSet resource.
*
* @param {string} phraseSetName
* A fully-qualified path representing PhraseSet resource.
* @returns {string} A string representing the phrase_set.
*/
matchPhraseSetFromPhraseSetName(phraseSetName) {
return this.pathTemplates.phraseSetPathTemplate.match(phraseSetName)
.phrase_set;
}
/**
* Terminate the gRPC channel and close the client.

@@ -379,0 +431,0 @@ *

@@ -104,26 +104,281 @@ /// <reference types="node" />

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Create a set of phrase hints. Each item in the set can be a single word or
* a multi-word phrase. The items in the PhraseSet are favored by the
* recognition model when you send a call that includes the PhraseSet.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource where this phrase set will be created. Format:
*
* `projects/{project}/locations/{location}/phraseSets`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {string} request.phraseSetId
* Required. The ID to use for the phrase set, which will become the final
* component of the phrase set's resource name.
*
* This value should be 4-63 characters, and valid characters
* are /{@link 0-9|a-z}-/.
* @param {google.cloud.speech.v1p1beta1.PhraseSet} request.phraseSet
* Required. The phrase set to create.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.create_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_CreatePhraseSet_async
*/
createPhraseSet(request?: protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest | undefined, {} | undefined]>;
createPhraseSet(request: protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest | null | undefined, {} | null | undefined>): void;
createPhraseSet(request: protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.ICreatePhraseSetRequest | null | undefined, {} | null | undefined>): void;
/**
* Get a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the phrase set to retrieve. Format:
*
* `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.get_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_GetPhraseSet_async
*/
getPhraseSet(request?: protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest | undefined, {} | undefined]>;
getPhraseSet(request: protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest | null | undefined, {} | null | undefined>): void;
getPhraseSet(request: protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IGetPhraseSetRequest | null | undefined, {} | null | undefined>): void;
/**
* Update a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.PhraseSet} request.phraseSet
* Required. The phrase set to update.
*
* The phrase set's `name` field is used to identify the set to be
* updated. Format:
*
* `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {google.protobuf.FieldMask} request.updateMask
* The list of fields to be updated.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.update_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_UpdatePhraseSet_async
*/
updatePhraseSet(request?: protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest | undefined, {} | undefined]>;
updatePhraseSet(request: protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest | null | undefined, {} | null | undefined>): void;
updatePhraseSet(request: protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.IPhraseSet, protos.google.cloud.speech.v1p1beta1.IUpdatePhraseSetRequest | null | undefined, {} | null | undefined>): void;
/**
* Delete a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the phrase set to delete. Format:
*
* `projects/{project}/locations/{location}/phraseSets/{phrase_set}`
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.delete_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_DeletePhraseSet_async
*/
deletePhraseSet(request?: protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest, options?: CallOptions): Promise<[protos.google.protobuf.IEmpty, protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest | undefined, {} | undefined]>;
deletePhraseSet(request: protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest, options: CallOptions, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest | null | undefined, {} | null | undefined>): void;
deletePhraseSet(request: protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.speech.v1p1beta1.IDeletePhraseSetRequest | null | undefined, {} | null | undefined>): void;
/**
* Create a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource where this custom class will be created. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {string} request.customClassId
* Required. The ID to use for the custom class, which will become the final
* component of the custom class' resource name.
*
* This value should be 4-63 characters, and valid characters
* are /{@link 0-9|a-z}-/.
* @param {google.cloud.speech.v1p1beta1.CustomClass} request.customClass
* Required. The custom class to create.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.create_custom_class.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_CreateCustomClass_async
*/
createCustomClass(request?: protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.ICustomClass, (protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest | undefined), {} | undefined]>;
createCustomClass(request: protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest | null | undefined, {} | null | undefined>): void;
createCustomClass(request: protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.ICreateCustomClassRequest | null | undefined, {} | null | undefined>): void;
/**
* Get a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the custom class to retrieve. Format:
*
* `projects/{project}/locations/{location}/customClasses/{custom_class}`
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.get_custom_class.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_GetCustomClass_async
*/
getCustomClass(request?: protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest | undefined, {} | undefined]>;
getCustomClass(request: protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest | null | undefined, {} | null | undefined>): void;
getCustomClass(request: protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.IGetCustomClassRequest | null | undefined, {} | null | undefined>): void;
/**
* Update a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.CustomClass} request.customClass
* Required. The custom class to update.
*
* The custom class's `name` field is used to identify the custom class to be
* updated. Format:
*
* `projects/{project}/locations/{location}/customClasses/{custom_class}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {google.protobuf.FieldMask} request.updateMask
* The list of fields to be updated.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.update_custom_class.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_UpdateCustomClass_async
*/
updateCustomClass(request?: protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.ICustomClass, (protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest | undefined), {} | undefined]>;
updateCustomClass(request: protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest | null | undefined, {} | null | undefined>): void;
updateCustomClass(request: protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest, callback: Callback<protos.google.cloud.speech.v1p1beta1.ICustomClass, protos.google.cloud.speech.v1p1beta1.IUpdateCustomClassRequest | null | undefined, {} | null | undefined>): void;
/**
* Delete a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the custom class to delete. Format:
*
* `projects/{project}/locations/{location}/customClasses/{custom_class}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/adaptation.delete_custom_class.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_DeleteCustomClass_async
*/
deleteCustomClass(request?: protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest, options?: CallOptions): Promise<[protos.google.protobuf.IEmpty, (protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest | undefined), {} | undefined]>;
deleteCustomClass(request: protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest, options: CallOptions, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest | null | undefined, {} | null | undefined>): void;
deleteCustomClass(request: protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest, callback: Callback<protos.google.protobuf.IEmpty, protos.google.cloud.speech.v1p1beta1.IDeleteCustomClassRequest | null | undefined, {} | null | undefined>): void;
/**
* List phrase sets.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set. Format:
*
* `projects/{project}/locations/{location}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize
* The maximum number of phrase sets to return. The service may return
* fewer than this value. If unspecified, at most 50 phrase sets will be
* returned. The maximum value is 1000; values above 1000 will be coerced to
* 1000.
* @param {string} request.pageToken
* A page token, received from a previous `ListPhraseSet` call.
* Provide this to retrieve the subsequent page.
*
* When paginating, all other parameters provided to `ListPhraseSet` must
* match the call that provided the page token.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is Array of [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed and will merge results from all the pages into this array.
* Note that it can affect your quota.
* We recommend using `listPhraseSetAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
listPhraseSet(request?: protos.google.cloud.speech.v1p1beta1.IListPhraseSetRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.IPhraseSet[], protos.google.cloud.speech.v1p1beta1.IListPhraseSetRequest | null, protos.google.cloud.speech.v1p1beta1.IListPhraseSetResponse]>;

@@ -137,5 +392,11 @@ listPhraseSet(request: protos.google.cloud.speech.v1p1beta1.IListPhraseSetRequest, options: CallOptions, callback: PaginationCallback<protos.google.cloud.speech.v1p1beta1.IListPhraseSetRequest, protos.google.cloud.speech.v1p1beta1.IListPhraseSetResponse | null | undefined, protos.google.cloud.speech.v1p1beta1.IPhraseSet>): void;

* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set.
* Format:
* projects/{project}/locations/{location}
* Required. The parent, which owns this collection of phrase set. Format:
*
* `projects/{project}/locations/{location}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -172,5 +433,11 @@ * The maximum number of phrase sets to return. The service may return

* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set.
* Format:
* projects/{project}/locations/{location}
* Required. The parent, which owns this collection of phrase set. Format:
*
* `projects/{project}/locations/{location}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -197,9 +464,45 @@ * The maximum number of phrase sets to return. The service may return

* for more details and examples.
* @example
* const iterable = client.listPhraseSetAsync(request);
* for await (const response of iterable) {
* // process response
* }
* @example <caption>include:samples/generated/v1p1beta1/adaptation.list_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_ListPhraseSet_async
*/
listPhraseSetAsync(request?: protos.google.cloud.speech.v1p1beta1.IListPhraseSetRequest, options?: CallOptions): AsyncIterable<protos.google.cloud.speech.v1p1beta1.IPhraseSet>;
/**
* List custom classes.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize
* The maximum number of custom classes to return. The service may return
* fewer than this value. If unspecified, at most 50 custom classes will be
* returned. The maximum value is 1000; values above 1000 will be coerced to
* 1000.
* @param {string} request.pageToken
* A page token, received from a previous `ListCustomClass` call.
* Provide this to retrieve the subsequent page.
*
* When paginating, all other parameters provided to `ListCustomClass` must
* match the call that provided the page token.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is Array of [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed and will merge results from all the pages into this array.
* Note that it can affect your quota.
* We recommend using `listCustomClassesAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
listCustomClasses(request?: protos.google.cloud.speech.v1p1beta1.IListCustomClassesRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.ICustomClass[], protos.google.cloud.speech.v1p1beta1.IListCustomClassesRequest | null, protos.google.cloud.speech.v1p1beta1.IListCustomClassesResponse]>;

@@ -213,5 +516,11 @@ listCustomClasses(request: protos.google.cloud.speech.v1p1beta1.IListCustomClassesRequest, options: CallOptions, callback: PaginationCallback<protos.google.cloud.speech.v1p1beta1.IListCustomClassesRequest, protos.google.cloud.speech.v1p1beta1.IListCustomClassesResponse | null | undefined, protos.google.cloud.speech.v1p1beta1.ICustomClass>): void;

* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* Required. The parent, which owns this collection of custom classes. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -248,5 +557,11 @@ * The maximum number of custom classes to return. The service may return

* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* Required. The parent, which owns this collection of custom classes. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -273,7 +588,4 @@ * The maximum number of custom classes to return. The service may return

* for more details and examples.
* @example
* const iterable = client.listCustomClassesAsync(request);
* for await (const response of iterable) {
* // process response
* }
* @example <caption>include:samples/generated/v1p1beta1/adaptation.list_custom_classes.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_ListCustomClasses_async
*/

@@ -280,0 +592,0 @@ listCustomClassesAsync(request?: protos.google.cloud.speech.v1p1beta1.IListCustomClassesRequest, options?: CallOptions): AsyncIterable<protos.google.cloud.speech.v1p1beta1.ICustomClass>;

@@ -245,31 +245,2 @@ "use strict";

}
/**
* Create a set of phrase hints. Each item in the set can be a single word or
* a multi-word phrase. The items in the PhraseSet are favored by the
* recognition model when you send a call that includes the PhraseSet.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource where this phrase set will be created.
* Format:
* {api_version}/projects/{project}/locations/{location}/phraseSets
* @param {string} request.phraseSetId
* Required. The ID to use for the phrase set, which will become the final
* component of the phrase set's resource name.
*
* This value should be 4-63 characters, and valid characters
* are /{@link 0-9|a-z}-/.
* @param {google.cloud.speech.v1p1beta1.PhraseSet} request.phraseSet
* Required. The phrase set to create.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.createPhraseSet(request);
*/
createPhraseSet(request, optionsOrCallback, callback) {

@@ -295,21 +266,2 @@ request = request || {};

}
/**
* Get a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the phrase set to retrieve.
* Format:
* {api_version}/projects/{project}/locations/{location}/phraseSets/{phrase_set}
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.getPhraseSet(request);
*/
getPhraseSet(request, optionsOrCallback, callback) {

@@ -335,25 +287,2 @@ request = request || {};

}
/**
* Update a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.PhraseSet} request.phraseSet
* Required. The phrase set to update.
*
* The phrase set's `name` field is used to identify the set to be
* updated. Format:
* {api_version}/projects/{project}/locations/{location}/phraseSets/{phrase_set}
* @param {google.protobuf.FieldMask} request.updateMask
* The list of fields to be updated.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.updatePhraseSet(request);
*/
updatePhraseSet(request, optionsOrCallback, callback) {

@@ -379,21 +308,2 @@ request = request || {};

}
/**
* Delete a phrase set.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the phrase set to delete.
* Format:
* {api_version}/projects/{project}/locations/{location}/phraseSets/{phrase_set}
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.deletePhraseSet(request);
*/
deletePhraseSet(request, optionsOrCallback, callback) {

@@ -419,29 +329,2 @@ request = request || {};

}
/**
* Create a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent resource where this custom class will be created.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* @param {string} request.customClassId
* Required. The ID to use for the custom class, which will become the final
* component of the custom class' resource name.
*
* This value should be 4-63 characters, and valid characters
* are /{@link 0-9|a-z}-/.
* @param {google.cloud.speech.v1p1beta1.CustomClass} request.customClass
* Required. The custom class to create.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.createCustomClass(request);
*/
createCustomClass(request, optionsOrCallback, callback) {

@@ -467,21 +350,2 @@ request = request || {};

}
/**
* Get a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the custom class to retrieve.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses/{custom_class}
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.getCustomClass(request);
*/
getCustomClass(request, optionsOrCallback, callback) {

@@ -507,25 +371,2 @@ request = request || {};

}
/**
* Update a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.CustomClass} request.customClass
* Required. The custom class to update.
*
* The custom class's `name` field is used to identify the custom class to be
* updated. Format:
* {api_version}/projects/{project}/locations/{location}/customClasses/{custom_class}
* @param {google.protobuf.FieldMask} request.updateMask
* The list of fields to be updated.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.updateCustomClass(request);
*/
updateCustomClass(request, optionsOrCallback, callback) {

@@ -551,21 +392,2 @@ request = request || {};

}
/**
* Delete a custom class.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.name
* Required. The name of the custom class to delete.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses/{custom_class}
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [Empty]{@link google.protobuf.Empty}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.deleteCustomClass(request);
*/
deleteCustomClass(request, optionsOrCallback, callback) {

@@ -591,35 +413,2 @@ request = request || {};

}
/**
* List phrase sets.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set.
* Format:
* projects/{project}/locations/{location}
* @param {number} request.pageSize
* The maximum number of phrase sets to return. The service may return
* fewer than this value. If unspecified, at most 50 phrase sets will be
* returned. The maximum value is 1000; values above 1000 will be coerced to
* 1000.
* @param {string} request.pageToken
* A page token, received from a previous `ListPhraseSet` call.
* Provide this to retrieve the subsequent page.
*
* When paginating, all other parameters provided to `ListPhraseSet` must
* match the call that provided the page token.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is Array of [PhraseSet]{@link google.cloud.speech.v1p1beta1.PhraseSet}.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed and will merge results from all the pages into this array.
* Note that it can affect your quota.
* We recommend using `listPhraseSetAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
listPhraseSet(request, optionsOrCallback, callback) {

@@ -650,5 +439,11 @@ request = request || {};

* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set.
* Format:
* projects/{project}/locations/{location}
* Required. The parent, which owns this collection of phrase set. Format:
*
* `projects/{project}/locations/{location}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -686,3 +481,4 @@ * The maximum number of phrase sets to return. The service may return

});
const callSettings = new gax.CallSettings(options);
const defaultCallSettings = this._defaults['listPhraseSet'];
const callSettings = defaultCallSettings.merge(options);
this.initialize();

@@ -698,5 +494,11 @@ return this.descriptors.page.listPhraseSet.createStream(this.innerApiCalls.listPhraseSet, request, callSettings);

* @param {string} request.parent
* Required. The parent, which owns this collection of phrase set.
* Format:
* projects/{project}/locations/{location}
* Required. The parent, which owns this collection of phrase set. Format:
*
* `projects/{project}/locations/{location}`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -723,7 +525,4 @@ * The maximum number of phrase sets to return. The service may return

* for more details and examples.
* @example
* const iterable = client.listPhraseSetAsync(request);
* for await (const response of iterable) {
* // process response
* }
* @example <caption>include:samples/generated/v1p1beta1/adaptation.list_phrase_set.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_ListPhraseSet_async
*/

@@ -739,40 +538,7 @@ listPhraseSetAsync(request, options) {

});
options = options || {};
const callSettings = new gax.CallSettings(options);
const defaultCallSettings = this._defaults['listPhraseSet'];
const callSettings = defaultCallSettings.merge(options);
this.initialize();
return this.descriptors.page.listPhraseSet.asyncIterate(this.innerApiCalls['listPhraseSet'], request, callSettings);
}
/**
* List custom classes.
*
* @param {Object} request
* The request object that will be sent.
* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* @param {number} request.pageSize
* The maximum number of custom classes to return. The service may return
* fewer than this value. If unspecified, at most 50 custom classes will be
* returned. The maximum value is 1000; values above 1000 will be coerced to
* 1000.
* @param {string} request.pageToken
* A page token, received from a previous `ListCustomClass` call.
* Provide this to retrieve the subsequent page.
*
* When paginating, all other parameters provided to `ListCustomClass` must
* match the call that provided the page token.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is Array of [CustomClass]{@link google.cloud.speech.v1p1beta1.CustomClass}.
* The client library will perform auto-pagination by default: it will call the API as many
* times as needed and will merge results from all the pages into this array.
* Note that it can affect your quota.
* We recommend using `listCustomClassesAsync()`
* method described below for async iteration which you can stop as needed.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#auto-pagination)
* for more details and examples.
*/
listCustomClasses(request, optionsOrCallback, callback) {

@@ -803,5 +569,11 @@ request = request || {};

* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* Required. The parent, which owns this collection of custom classes. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -839,3 +611,4 @@ * The maximum number of custom classes to return. The service may return

});
const callSettings = new gax.CallSettings(options);
const defaultCallSettings = this._defaults['listCustomClasses'];
const callSettings = defaultCallSettings.merge(options);
this.initialize();

@@ -851,5 +624,11 @@ return this.descriptors.page.listCustomClasses.createStream(this.innerApiCalls.listCustomClasses, request, callSettings);

* @param {string} request.parent
* Required. The parent, which owns this collection of custom classes.
* Format:
* {api_version}/projects/{project}/locations/{location}/customClasses
* Required. The parent, which owns this collection of custom classes. Format:
*
* `projects/{project}/locations/{location}/customClasses`
*
* Speech-to-Text supports three locations: `global`, `us` (US North America),
* and `eu` (Europe). If you are calling the `speech.googleapis.com`
* endpoint, use the `global` location. To specify a region, use a
* [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or
* `eu` location value.
* @param {number} request.pageSize

@@ -876,7 +655,4 @@ * The maximum number of custom classes to return. The service may return

* for more details and examples.
* @example
* const iterable = client.listCustomClassesAsync(request);
* for await (const response of iterable) {
* // process response
* }
* @example <caption>include:samples/generated/v1p1beta1/adaptation.list_custom_classes.js</caption>
* region_tag:speech_v1p1beta1_generated_Adaptation_ListCustomClasses_async
*/

@@ -892,4 +668,4 @@ listCustomClassesAsync(request, options) {

});
options = options || {};
const callSettings = new gax.CallSettings(options);
const defaultCallSettings = this._defaults['listCustomClasses'];
const callSettings = defaultCallSettings.merge(options);
this.initialize();

@@ -896,0 +672,0 @@ return this.descriptors.page.listCustomClasses.asyncIterate(this.innerApiCalls['listCustomClasses'], request, callSettings);

@@ -103,2 +103,23 @@ import * as gax from 'google-gax';

getProjectId(callback: Callback<string, undefined, undefined>): void;
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1p1beta1.RecognizeResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/speech.recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_Recognize_async
*/
recognize(request?: protos.google.cloud.speech.v1p1beta1.IRecognizeRequest, options?: CallOptions): Promise<[protos.google.cloud.speech.v1p1beta1.IRecognizeResponse, protos.google.cloud.speech.v1p1beta1.IRecognizeRequest | undefined, {} | undefined]>;

@@ -120,10 +141,35 @@ recognize(request: protos.google.cloud.speech.v1p1beta1.IRecognizeRequest, options: CallOptions, callback: Callback<protos.google.cloud.speech.v1p1beta1.IRecognizeResponse, protos.google.cloud.speech.v1p1beta1.IRecognizeRequest | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const stream = client.streamingRecognize();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1p1beta1/speech.streaming_recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_StreamingRecognize_async
*/
_streamingRecognize(options?: CallOptions): gax.CancellableStream;
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
* `Operation.error` or an `Operation.response` which contains
* a `LongRunningRecognizeResponse` message.
* For more information on asynchronous speech recognition, see the
* [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {google.cloud.speech.v1p1beta1.TranscriptOutputConfig} [request.outputConfig]
* Optional. Specifies an optional destination for the recognition results.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing
* a long running operation. Its `promise()` method returns a promise
* you can `await` for.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations)
* for more details and examples.
* @example <caption>include:samples/generated/v1p1beta1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_LongRunningRecognize_async
*/
longRunningRecognize(request?: protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest, options?: CallOptions): Promise<[LROperation<protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse, protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata>, protos.google.longrunning.IOperation | undefined, {} | undefined]>;

@@ -141,7 +187,4 @@ longRunningRecognize(request: protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeRequest, options: CallOptions, callback: Callback<LROperation<protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeResponse, protos.google.cloud.speech.v1p1beta1.ILongRunningRecognizeMetadata>, protos.google.longrunning.IOperation | null | undefined, {} | null | undefined>): void;

* for more details and examples.
* @example
* const decodedOperation = await checkLongRunningRecognizeProgress(name);
* console.log(decodedOperation.result);
* console.log(decodedOperation.done);
* console.log(decodedOperation.metadata);
* @example <caption>include:samples/generated/v1p1beta1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_LongRunningRecognize_async
*/

@@ -148,0 +191,0 @@ checkLongRunningRecognizeProgress(name: string): Promise<LROperation<protos.google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse, protos.google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata>>;

@@ -252,23 +252,2 @@ "use strict";

}
/**
* Performs synchronous speech recognition: receive results after all audio
* has been sent and processed.
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing [RecognizeResponse]{@link google.cloud.speech.v1p1beta1.RecognizeResponse}.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#regular-methods)
* for more details and examples.
* @example
* const [response] = await client.recognize(request);
*/
recognize(request, optionsOrCallback, callback) {

@@ -285,2 +264,4 @@ request = request || {};

options = options || {};
options.otherArgs = options.otherArgs || {};
options.otherArgs.headers = options.otherArgs.headers || {};
this.initialize();

@@ -302,8 +283,4 @@ return this.innerApiCalls.recognize(request, options, callback);

* for more details and examples.
* @example
* const stream = client.streamingRecognize();
* stream.on('data', (response) => { ... });
* stream.on('end', () => { ... });
* stream.write(request);
* stream.end();
* @example <caption>include:samples/generated/v1p1beta1/speech.streaming_recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_StreamingRecognize_async
*/

@@ -314,32 +291,2 @@ _streamingRecognize(options) {

}
/**
* Performs asynchronous speech recognition: receive results via the
* google.longrunning.Operations interface. Returns either an
* `Operation.error` or an `Operation.response` which contains
* a `LongRunningRecognizeResponse` message.
* For more information on asynchronous speech recognition, see the
* [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
*
* @param {Object} request
* The request object that will be sent.
* @param {google.cloud.speech.v1p1beta1.RecognitionConfig} request.config
* Required. Provides information to the recognizer that specifies how to
* process the request.
* @param {google.cloud.speech.v1p1beta1.RecognitionAudio} request.audio
* Required. The audio data to be recognized.
* @param {google.cloud.speech.v1p1beta1.TranscriptOutputConfig} [request.outputConfig]
* Optional. Specifies an optional destination for the recognition results.
* @param {object} [options]
* Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details.
* @returns {Promise} - The promise which resolves to an array.
* The first element of the array is an object representing
* a long running operation. Its `promise()` method returns a promise
* you can `await` for.
* Please see the
* [documentation](https://github.com/googleapis/gax-nodejs/blob/master/client-libraries.md#long-running-operations)
* for more details and examples.
* @example
* const [operation] = await client.longRunningRecognize(request);
* const [response] = await operation.promise();
*/
longRunningRecognize(request, optionsOrCallback, callback) {

@@ -356,2 +303,4 @@ request = request || {};

options = options || {};
options.otherArgs = options.otherArgs || {};
options.otherArgs.headers = options.otherArgs.headers || {};
this.initialize();

@@ -369,7 +318,4 @@ return this.innerApiCalls.longRunningRecognize(request, options, callback);

* for more details and examples.
* @example
* const decodedOperation = await checkLongRunningRecognizeProgress(name);
* console.log(decodedOperation.result);
* console.log(decodedOperation.done);
* console.log(decodedOperation.metadata);
* @example <caption>include:samples/generated/v1p1beta1/speech.long_running_recognize.js</caption>
* region_tag:speech_v1p1beta1_generated_Speech_LongRunningRecognize_async
*/

@@ -376,0 +322,0 @@ async checkLongRunningRecognizeProgress(name) {

@@ -7,2 +7,20 @@ # Changelog

## [4.10.0](https://github.com/googleapis/nodejs-speech/compare/v4.9.0...v4.10.0) (2022-01-17)
### Features
* add alternative_language_codes to RecognitionConfig ([#824](https://github.com/googleapis/nodejs-speech/issues/824)) ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
* add hint boost in SpeechContext ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
* add result_end_time to SpeechRecognitionResult ([#825](https://github.com/googleapis/nodejs-speech/issues/825)) ([11363fe](https://github.com/googleapis/nodejs-speech/commit/11363fe0145245ea64137196b8e68462a5c1129b))
* add SpeechAdaptation configuration ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
* add spoken punctuation and spoken emojis ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
* add WEBM_OPUS codec ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
* add word confidence ([f5cfad6](https://github.com/googleapis/nodejs-speech/commit/f5cfad6ba198ae109679feac84340b7f83c236fa))
### Bug Fixes
* **deps:** update dependency chalk to v5 ([#823](https://github.com/googleapis/nodejs-speech/issues/823)) ([f6b48b2](https://github.com/googleapis/nodejs-speech/commit/f6b48b289f45f0aabc953dc30599ea02ad754030))
## [4.9.0](https://www.github.com/googleapis/nodejs-speech/compare/v4.8.1...v4.9.0) (2021-09-20)

@@ -9,0 +27,0 @@

{
"name": "@google-cloud/speech",
"description": "Cloud Speech Client Library for Node.js",
"version": "4.9.0",
"version": "4.10.0",
"license": "Apache-2.0",

@@ -59,7 +59,7 @@ "author": "Google Inc.",

"@types/mocha": "^8.0.0",
"@types/node": "^14.0.0",
"@types/node": "^16.0.0",
"@types/sinon": "^10.0.0",
"c8": "^7.0.0",
"codecov": "^3.0.2",
"gts": "^2.0.0",
"gts": "^3.0.0",
"jsdoc": "^3.5.5",

@@ -72,3 +72,3 @@ "jsdoc-fresh": "^1.0.1",

"pack-n-play": "^1.0.0-2",
"sinon": "^11.0.0",
"sinon": "^12.0.0",
"ts-loader": "^8.0.0",

@@ -75,0 +75,0 @@ "typescript": "^3.8.3",

@@ -7,5 +7,4 @@ [//]: # "This README.md file is auto-generated, all changes to this file will be lost."

[![release level](https://img.shields.io/badge/release%20level-general%20availability%20%28GA%29-brightgreen.svg?style=flat)](https://cloud.google.com/terms/launch-stages)
[![release level](https://img.shields.io/badge/release%20level-stable-brightgreen.svg?style=flat)](https://cloud.google.com/terms/launch-stages)
[![npm version](https://img.shields.io/npm/v/@google-cloud/speech.svg)](https://www.npmjs.org/package/@google-cloud/speech)
[![codecov](https://img.shields.io/codecov/c/github/googleapis/nodejs-speech/main.svg?style=flat)](https://codecov.io/gh/googleapis/nodejs-speech)

@@ -147,6 +146,6 @@

This library is considered to be **General Availability (GA)**. This means it
is stable; the code surface will not change in backwards-incompatible ways
This library is considered to be **stable**. The code surface will not change in backwards-incompatible ways
unless absolutely necessary (e.g. because of critical security issues) or with
an extensive deprecation period. Issues and requests against **GA** libraries
an extensive deprecation period. Issues and requests against **stable** libraries
are addressed with the highest priority.

@@ -158,2 +157,3 @@

More Information: [Google Cloud Platform Launch Stages][launch_stages]

@@ -160,0 +160,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc