/**
* Defines speech property ids.
* @class PropertyId
*/
export declare enum PropertyId {
/**
* The Cognitive Services Speech Service subscription Key. If you are using an intent recognizer, you need to specify
* to specify the LUIS endpoint key for your particular LUIS app. Under normal circumstances, you shouldn't
* have to use this property directly.
* Instead, use [[SpeechConfig.fromSubscription]].
* @member PropertyId.SpeechServiceConnection_Key
*/
SpeechServiceConnection_Key = 0,
/**
* The Cognitive Services Speech Service endpoint (url). Under normal circumstances, you shouldn't
* have to use this property directly.
* Instead, use [[SpeechConfig.fromEndpoint]].
* NOTE: This endpoint is not the same as the endpoint used to obtain an access token.
* @member PropertyId.SpeechServiceConnection_Endpoint
*/
SpeechServiceConnection_Endpoint = 1,
/**
* The Cognitive Services Speech Service region. Under normal circumstances, you shouldn't have to
* use this property directly.
* Instead, use [[SpeechConfig.fromSubscription]], [[SpeechConfig.fromEndpoint]], [[SpeechConfig.fromAuthorizationToken]].
* @member PropertyId.SpeechServiceConnection_Region
*/
SpeechServiceConnection_Region = 2,
/**
* The Cognitive Services Speech Service authorization token (aka access token). Under normal circumstances,
* you shouldn't have to use this property directly.
* Instead, use [[SpeechConfig.fromAuthorizationToken]],
* [[SpeechRecognizer.authorizationToken]], [[IntentRecognizer.authorizationToken]], [[TranslationRecognizer.authorizationToken]].
* @member PropertyId.SpeechServiceAuthorization_Token
*/
SpeechServiceAuthorization_Token = 3,
/**
* The Cognitive Services Speech Service authorization type. Currently unused.
* @member PropertyId.SpeechServiceAuthorization_Type
*/
SpeechServiceAuthorization_Type = 4,
/**
* The Cognitive Services Speech Service endpoint id. Under normal circumstances, you shouldn't
* have to use this property directly.
* Instead, use [[SpeechConfig.endpointId]].
* NOTE: The endpoint id is available in the Speech Portal, listed under Endpoint Details.
* @member PropertyId.SpeechServiceConnection_EndpointId
*/
SpeechServiceConnection_EndpointId = 5,
/**
* The list of comma separated languages (BCP-47 format) used as target translation languages. Under normal circumstances,
* you shouldn't have to use this property directly.
* Instead use [[SpeechTranslationConfig.addTargetLanguage]],
* [[SpeechTranslationConfig.targetLanguages]], [[TranslationRecognizer.targetLanguages]].
* @member PropertyId.SpeechServiceConnection_TranslationToLanguages
*/
SpeechServiceConnection_TranslationToLanguages = 6,
/**
* The name of the Cognitive Service Text to Speech Service Voice. Under normal circumstances, you shouldn't have to use this
* property directly.
* Instead, use [[SpeechTranslationConfig.voiceName]].
* NOTE: Valid voice names can be found here.
* @member PropertyId.SpeechServiceConnection_TranslationVoice
*/
SpeechServiceConnection_TranslationVoice = 7,
/**
* Translation features.
* @member PropertyId.SpeechServiceConnection_TranslationFeatures
*/
SpeechServiceConnection_TranslationFeatures = 8,
/**
* The Language Understanding Service Region. Under normal circumstances, you shouldn't have to use this property directly.
* Instead, use [[LanguageUnderstandingModel]].
* @member PropertyId.SpeechServiceConnection_IntentRegion
*/
SpeechServiceConnection_IntentRegion = 9,
/**
* The host name of the proxy server used to connect to the Cognitive Services Speech Service. Only relevant in Node.js environments.
* You shouldn't have to use this property directly.
* Instead use .
* Added in version 1.4.0.
*/
SpeechServiceConnection_ProxyHostName = 10,
/**
* The port of the proxy server used to connect to the Cognitive Services Speech Service. Only relevant in Node.js environments.
* You shouldn't have to use this property directly.
* Instead use .
* Added in version 1.4.0.
*/
SpeechServiceConnection_ProxyPort = 11,
/**
* The user name of the proxy server used to connect to the Cognitive Services Speech Service. Only relevant in Node.js environments.
* You shouldn't have to use this property directly.
* Instead use .
* Added in version 1.4.0.
*/
SpeechServiceConnection_ProxyUserName = 12,
/**
* The password of the proxy server used to connect to the Cognitive Services Speech Service. Only relevant in Node.js environments.
* You shouldn't have to use this property directly.
* Instead use .
* Added in version 1.4.0.
*/
SpeechServiceConnection_ProxyPassword = 13,
/**
* The Cognitive Services Speech Service recognition Mode. Can be "INTERACTIVE", "CONVERSATION", "DICTATION".
* This property is intended to be read-only. The SDK is using it internally.
* @member PropertyId.SpeechServiceConnection_RecoMode
*/
SpeechServiceConnection_RecoMode = 14,
/**
* The spoken language to be recognized (in BCP-47 format). Under normal circumstances, you shouldn't have to use this property
* directly.
* Instead, use [[SpeechConfig.speechRecognitionLanguage]].
* @member PropertyId.SpeechServiceConnection_RecoLanguage
*/
SpeechServiceConnection_RecoLanguage = 15,
/**
* The session id. This id is a universally unique identifier (aka UUID) representing a specific binding of an audio input stream
* and the underlying speech recognition instance to which it is bound. Under normal circumstances, you shouldn't have to use this
* property directly.
* Instead use [[SessionEventArgs.sessionId]].
* @member PropertyId.Speech_SessionId
*/
Speech_SessionId = 16,
/**
* The requested Cognitive Services Speech Service response output format (simple or detailed). Under normal circumstances, you shouldn't have
* to use this property directly.
* Instead use [[SpeechConfig.outputFormat]].
* @member PropertyId.SpeechServiceResponse_RequestDetailedResultTrueFalse
*/
SpeechServiceResponse_RequestDetailedResultTrueFalse = 17,
/**
* The requested Cognitive Services Speech Service response output profanity level. Currently unused.
* @member PropertyId.SpeechServiceResponse_RequestProfanityFilterTrueFalse
*/
SpeechServiceResponse_RequestProfanityFilterTrueFalse = 18,
/**
* The Cognitive Services Speech Service response output (in JSON format). This property is available on recognition result objects only.
* @member PropertyId.SpeechServiceResponse_JsonResult
*/
SpeechServiceResponse_JsonResult = 19,
/**
* The Cognitive Services Speech Service error details (in JSON format). Under normal circumstances, you shouldn't have to
* use this property directly. Instead use [[CancellationDetails.errorDetails]].
* @member PropertyId.SpeechServiceResponse_JsonErrorDetails
*/
SpeechServiceResponse_JsonErrorDetails = 20,
/**
* The cancellation reason. Currently unused.
* @member PropertyId.CancellationDetails_Reason
*/
CancellationDetails_Reason = 21,
/**
* The cancellation text. Currently unused.
* @member PropertyId.CancellationDetails_ReasonText
*/
CancellationDetails_ReasonText = 22,
/**
* The Cancellation detailed text. Currently unused.
* @member PropertyId.CancellationDetails_ReasonDetailedText
*/
CancellationDetails_ReasonDetailedText = 23,
/**
* The Language Understanding Service response output (in JSON format). Available via [[IntentRecognitionResult]]
* @member PropertyId.LanguageUnderstandingServiceResponse_JsonResult
*/
LanguageUnderstandingServiceResponse_JsonResult = 24,
/**
* The URL string built from speech configuration.
* This property is intended to be read-only. The SDK is using it internally.
* NOTE: Added in version 1.7.0.
*/
SpeechServiceConnection_Url = 25,
/**
* The initial silence timeout value (in milliseconds) used by the service.
* Added in version 1.7.0
*/
SpeechServiceConnection_InitialSilenceTimeoutMs = 26,
/**
* The end silence timeout value (in milliseconds) used by the service.
* Added in version 1.7.0
*/
SpeechServiceConnection_EndSilenceTimeoutMs = 27,
/**
* A boolean value specifying whether audio logging is enabled in the service or not.
* Added in version 1.7.0
*/
SpeechServiceConnection_EnableAudioLogging = 28,
/**
* The requested Cognitive Services Speech Service response output profanity setting.
* Allowed values are "masked", "removed", and "raw".
* Added in version 1.7.0.
*/
SpeechServiceResponse_ProfanityOption = 29,
/**
* A string value specifying which post processing option should be used by service.
* Allowed values are "TrueText".
* Added in version 1.7.0
*/
SpeechServiceResponse_PostProcessingOption = 30,
/**
* A boolean value specifying whether to include word-level timestamps in the response result.
* Added in version 1.7.0
*/
SpeechServiceResponse_RequestWordLevelTimestamps = 31,
/**
* The number of times a word has to be in partial results to be returned.
* Added in version 1.7.0
*/
SpeechServiceResponse_StablePartialResultThreshold = 32,
/**
* A string value specifying the output format option in the response result. Internal use only.
* Added in version 1.7.0.
*/
SpeechServiceResponse_OutputFormatOption = 33,
/**
* A boolean value to request for stabilizing translation partial results by omitting words in the end.
* Added in version 1.7.0.
*/
SpeechServiceResponse_TranslationRequestStablePartialResult = 34,
/**
* Identifier used to connect to the backend service.
* @member PropertyId.Conversation_ApplicationId
*/
Conversation_ApplicationId = 35,
/**
* Type of dialog backend to connect to.
* @member PropertyId.Conversation_DialogType
*/
Conversation_DialogType = 36,
/**
* Silence timeout for listening
* @member PropertyId.Conversation_Initial_Silence_Timeout
*/
Conversation_Initial_Silence_Timeout = 37,
/**
* From Id to add to speech recognition activities.
* @member PropertyId.Conversation_From_Id
*/
Conversation_From_Id = 38
}