diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index cb0abcfc..8fc8572e 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -1728,6 +1728,7 @@ types: docs: When provided, the input is spoken by EVI. properties: type: + display-name: Type type: literal<"assistant_input"> docs: >- The type of message sent through the socket; must be `assistant_input` @@ -1738,6 +1739,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: AssistantInput text: type: string docs: >- @@ -1756,6 +1758,7 @@ types: AudioConfiguration: properties: encoding: + display-name: Encoding type: Encoding docs: Encoding format of the audio input, such as `linear16`. channels: @@ -1772,6 +1775,7 @@ types: docs: When provided, the input is audio. properties: type: + display-name: Type type: literal<"audio_input"> docs: >- The type of message sent through the socket; must be `audio_input` for @@ -1787,6 +1791,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: AudioInput data: type: string docs: >- @@ -1807,19 +1812,23 @@ types: source: openapi: assistant-asyncapi.json BuiltInTool: + display-name: BuiltInTool type: literal<"web_search"> docs: >- Name of the built-in tool. Set to `web_search` to equip EVI with the built-in Web Search tool. BuiltinToolConfig: properties: - name: BuiltInTool + name: + display-name: BuiltInTool + type: BuiltInTool fallback_content: type: optional docs: >- Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. + display-name: BuiltinToolConfig source: openapi: assistant-asyncapi.json Context: @@ -1844,6 +1853,7 @@ types: - **Editable**: The original context is updated to reflect the new context. If the type is not specified, it will default to `temporary`. + display-name: Context text: type: string docs: >- @@ -1866,14 +1876,19 @@ types: - temporary source: openapi: assistant-asyncapi.json - Encoding: literal<"linear16"> - ErrorLevel: literal<"warn"> + Encoding: + display-name: Encoding + type: literal<"linear16"> + ErrorLevel: + display-name: ErrorLevel + type: literal<"warn"> PauseAssistantMessage: docs: >- Pause responses from EVI. Chat history is still saved and sent after resuming. properties: type: + display-name: Type type: literal<"pause_assistant_message"> docs: >- The type of message sent through the socket; must be @@ -1891,6 +1906,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: PauseAssistantMessage source: openapi: assistant-asyncapi.json ResumeAssistantMessage: @@ -1899,6 +1915,7 @@ types: sent. properties: type: + display-name: Type type: literal<"resume_assistant_message"> docs: >- The type of message sent through the socket; must be @@ -1917,12 +1934,14 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: ResumeAssistantMessage source: openapi: assistant-asyncapi.json SessionSettings: docs: Settings for this chat session. properties: type: + display-name: Type type: literal<"session_settings"> docs: >- The type of message sent through the socket; must be @@ -1957,6 +1976,7 @@ types: language model](/docs/empathic-voice-interface-evi/custom-language-model) with EVI to learn more. + display-name: SessionSettings system_prompt: type: optional docs: >- @@ -1979,6 +1999,7 @@ types: For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + display-name: SessionSettings context: type: optional docs: >- @@ -1993,6 +2014,7 @@ types: Set to `null` to disable context injection. + display-name: SessionSettings audio: type: optional docs: >- @@ -2006,6 +2028,7 @@ types: Linear 16 audio, please refer to the [Session Settings section](/docs/empathic-voice-interface-evi/configuration#session-settings) on the EVI Configuration page. + display-name: SessionSettings language_model_api_key: type: optional docs: >- @@ -2015,6 +2038,7 @@ types: When provided, EVI will use this key instead of Hume’s API key for the supplemental LLM. This allows you to bypass rate limits and utilize your own API key as needed. + display-name: SessionSettings tools: type: optional> docs: >- @@ -2026,6 +2050,7 @@ types: search, are natively integrated, while user-defined tools are created and invoked by the user. To learn more, see our [Tool Use Guide](/docs/empathic-voice-interface-evi/tool-use). + display-name: SessionSettings builtin_tools: type: optional> docs: >- @@ -2042,15 +2067,20 @@ types: Currently, the only built-in tool Hume provides is **Web Search**. When enabled, Web Search equips EVI with the ability to search the web for up-to-date information. - metadata: optional> + display-name: SessionSettings + metadata: + type: optional> + display-name: SessionSettings variables: type: optional> docs: Dynamic values that can be used to populate EVI prompts. + display-name: SessionSettings source: openapi: assistant-asyncapi.json Tool: properties: type: + display-name: ToolType type: ToolType docs: Type of tool. Set to `function` for user-defined tools. name: @@ -2071,18 +2101,21 @@ types: docs: >- An optional description of what the tool does, used by the supplemental LLM to choose when and how to call the function. + display-name: Tool fallback_content: type: optional docs: >- Optional text passed to the supplemental LLM if the tool call fails. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation. + display-name: Tool source: openapi: assistant-asyncapi.json ToolErrorMessage: docs: When provided, the output is a function call error. properties: type: + display-name: Type type: literal<"tool_error"> docs: >- The type of message sent through the socket; for a Tool Error message, @@ -2098,11 +2131,13 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: ToolErrorMessage tool_type: type: optional docs: >- Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. + display-name: ToolErrorMessage tool_call_id: type: string docs: >- @@ -2120,23 +2155,27 @@ types: Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the tool errors. + display-name: ToolErrorMessage error: type: string docs: Error message from the tool call, not exposed to the LLM or user. code: type: optional docs: Error code. Identifies the type of error encountered. + display-name: ToolErrorMessage level: type: optional docs: >- Indicates the severity of an error; for a Tool Error message, this must be `warn` to signal an unexpected event. + display-name: ToolErrorMessage source: openapi: assistant-asyncapi.json ToolResponseMessage: docs: When provided, the output is a function call response. properties: type: + display-name: Type type: literal<"tool_response"> docs: >- The type of message sent through the socket; for a Tool Response @@ -2152,6 +2191,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: ToolResponseMessage tool_call_id: type: string docs: >- @@ -2178,11 +2218,13 @@ types: which tool generated the response. The specified `tool_name` must match the one received in the [Tool Call message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). + display-name: ToolResponseMessage tool_type: type: optional docs: >- Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. + display-name: ToolResponseMessage source: openapi: assistant-asyncapi.json ToolType: @@ -2195,6 +2237,7 @@ types: docs: User text to insert into the conversation. properties: type: + display-name: Type type: literal<"user_input"> docs: >- The type of message sent through the socket; must be `user_input` for @@ -2205,6 +2248,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: UserInput text: type: string docs: >- @@ -2222,6 +2266,7 @@ types: docs: When provided, the output is an assistant end message. properties: type: + display-name: Type type: literal<"assistant_end"> docs: >- The type of message sent through the socket; for an Assistant End @@ -2236,12 +2281,14 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: AssistantEnd source: openapi: assistant-asyncapi.json AssistantMessage: docs: When provided, the output is an assistant message. properties: type: + display-name: Type type: literal<"assistant_message"> docs: >- The type of message sent through the socket; for an Assistant Message, @@ -2256,15 +2303,19 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: AssistantMessage id: type: optional docs: >- ID of the assistant message. Allows the Assistant Message to be tracked and referenced. + display-name: AssistantMessage message: + display-name: ChatMessage type: ChatMessage docs: Transcript of the message. models: + display-name: Inference type: Inference docs: Inference model results. from_text: @@ -2279,6 +2330,7 @@ types: docs: When provided, the output is audio. properties: type: + display-name: Type type: literal<"audio_output"> docs: >- The type of message sent through the socket; for an Audio Output @@ -2288,6 +2340,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: AudioOutput id: type: string docs: >- @@ -2305,30 +2358,37 @@ types: discriminated: false docs: Function call response from client. union: - - ToolResponseMessage - - ToolErrorMessage + - display-name: ToolResponseMessage + type: ToolResponseMessage + - display-name: ToolErrorMessage + type: ToolErrorMessage source: openapi: assistant-asyncapi.json ChatMessage: properties: role: + display-name: Role type: Role docs: Role of who is providing the message. content: type: optional docs: Transcript of the message. + display-name: ChatMessage tool_call: type: optional docs: Function call name and arguments. + display-name: ChatMessage tool_result: type: optional docs: Function call response from client. + display-name: ChatMessage source: openapi: assistant-asyncapi.json ChatMetadata: docs: When provided, the output is a chat metadata message. properties: type: + display-name: Type type: literal<"chat_metadata"> docs: >- The type of message sent through the socket; for a Chat Metadata @@ -2343,6 +2403,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: ChatMetadata chat_group_id: type: string docs: >- @@ -2425,6 +2486,7 @@ types: docs: When provided, the output is an error message. properties: type: + display-name: Type type: literal<"error"> docs: >- The type of message sent through the socket; for a Web Socket Error @@ -2439,6 +2501,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: Error code: type: string docs: Error code. Identifies the type of error encountered. @@ -2478,6 +2541,7 @@ types: ProsodyInference: properties: scores: + display-name: EmotionScores type: EmotionScores docs: >- The confidence scores for 48 emotions within the detected expression @@ -2528,6 +2592,7 @@ types: invocation, ensuring that the correct response is linked to the appropriate request. type: + display-name: Type type: literal<"tool_call"> docs: >- The type of message sent through the socket; for a Tool Call message, @@ -2541,11 +2606,13 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: ToolCallMessage tool_type: type: optional docs: >- Type of tool called. Either `builtin` for natively implemented tools, like web search, or `function` for user-defined tools. + display-name: ToolCallMessage response_required: type: boolean docs: >- @@ -2560,6 +2627,7 @@ types: docs: When provided, the output is an interruption. properties: type: + display-name: Type type: literal<"user_interruption"> docs: >- The type of message sent through the socket; for a User Interruption @@ -2576,6 +2644,7 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: UserInterruption time: type: integer docs: Unix timestamp of the detected user interruption. @@ -2585,6 +2654,7 @@ types: docs: When provided, the output is a user message. properties: type: + display-name: Type type: literal<"user_message"> docs: >- The type of message sent through the socket; for a User Message, this @@ -2604,13 +2674,17 @@ types: docs: >- Used to manage conversational state, correlate frontend and backend data, and persist conversations across EVI sessions. + display-name: UserMessage message: + display-name: ChatMessage type: ChatMessage docs: Transcript of the message. models: + display-name: Inference type: Inference docs: Inference model results. time: + display-name: MillisecondInterval type: MillisecondInterval docs: Start and End time of user message. from_text: @@ -2625,15 +2699,24 @@ types: JsonMessage: discriminated: false union: - - AssistantEnd - - AssistantMessage - - ChatMetadata - - WebSocketError - - UserInterruption - - UserMessage - - ToolCallMessage - - ToolResponseMessage - - ToolErrorMessage + - display-name: AssistantEnd + type: AssistantEnd + - display-name: AssistantMessage + type: AssistantMessage + - display-name: ChatMetadata + type: ChatMetadata + - display-name: Error + type: WebSocketError + - display-name: UserInterruption + type: UserInterruption + - display-name: UserMessage + type: UserMessage + - display-name: ToolCallMessage + type: ToolCallMessage + - display-name: ToolResponseMessage + type: ToolResponseMessage + - display-name: ToolErrorMessage + type: ToolErrorMessage source: openapi: assistant-asyncapi.json TtsInput: @@ -2654,12 +2737,16 @@ types: ExtendedVoiceArgs: properties: text: string - voice_args: VoiceArgs + voice_args: + display-name: VoiceArgs + type: VoiceArgs source: openapi: assistant-openapi.json HTTPValidationError: properties: - detail: optional> + detail: + type: optional> + display-name: HTTPValidationError source: openapi: assistant-openapi.json ValidationErrorLocItem: @@ -2671,20 +2758,26 @@ types: openapi: assistant-openapi.json ValidationError: properties: - loc: list + loc: + display-name: Location + type: list msg: string type: string source: openapi: assistant-openapi.json VoiceArgs: properties: - voice: optional + voice: + type: optional + display-name: VoiceArgs baseline: type: optional default: false + display-name: VoiceArgs reconstruct: type: optional default: false + display-name: VoiceArgs source: openapi: assistant-openapi.json VoiceNameEnum: diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml index c99729ae..696f82b9 100644 --- a/.mock/definition/empathic-voice/chat.yml +++ b/.mock/definition/empathic-voice/chat.yml @@ -119,28 +119,46 @@ types: SubscribeEvent: discriminated: false union: - - root.AssistantEnd - - root.AssistantMessage - - root.AudioOutput - - root.ChatMetadata - - root.WebSocketError - - root.UserInterruption - - root.UserMessage - - root.ToolCallMessage - - root.ToolResponseMessage - - root.ToolErrorMessage + - display-name: AssistantEnd + type: root.AssistantEnd + - display-name: AssistantMessage + type: root.AssistantMessage + - display-name: AudioOutput + type: root.AudioOutput + - display-name: ChatMetadata + type: root.ChatMetadata + - display-name: Error + type: root.WebSocketError + - display-name: UserInterruption + type: root.UserInterruption + - display-name: UserMessage + type: root.UserMessage + - display-name: ToolCallMessage + type: root.ToolCallMessage + - display-name: ToolResponseMessage + type: root.ToolResponseMessage + - display-name: ToolErrorMessage + type: root.ToolErrorMessage source: openapi: assistant-asyncapi.json PublishEvent: discriminated: false union: - - root.AudioInput - - root.SessionSettings - - root.UserInput - - root.AssistantInput - - root.ToolResponseMessage - - root.ToolErrorMessage - - root.PauseAssistantMessage - - root.ResumeAssistantMessage + - display-name: AudioInput + type: root.AudioInput + - display-name: SessionSettings + type: root.SessionSettings + - display-name: UserInput + type: root.UserInput + - display-name: AssistantInput + type: root.AssistantInput + - display-name: ToolResponseMessage + type: root.ToolResponseMessage + - display-name: ToolErrorMessage + type: root.ToolErrorMessage + - display-name: PauseAssistantMessage + type: root.PauseAssistantMessage + - display-name: ResumeAssistantMessage + type: root.ResumeAssistantMessage source: openapi: assistant-asyncapi.json diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml index 8fb80472..91d4078c 100644 --- a/.mock/definition/expression-measurement/batch/__package__.yml +++ b/.mock/definition/expression-measurement/batch/__package__.yml @@ -427,9 +427,11 @@ types: properties: time: TimeInterval emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list descriptions: + display-name: Descriptions Scores docs: Modality-specific descriptive features and their scores. type: list source: @@ -647,6 +649,7 @@ types: docs: The predicted probability that a detected face was actually a face. box: BoundingBox emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list facs: @@ -660,6 +663,7 @@ types: FacemeshPrediction: properties: emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list source: @@ -1020,6 +1024,7 @@ types: Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list sentiment: @@ -1117,6 +1122,7 @@ types: Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list source: @@ -1211,6 +1217,7 @@ types: Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. emotions: + display-name: Emotions Scores docs: A high-dimensional embedding in emotion space. type: list source: @@ -1251,176 +1258,169 @@ types: source: openapi: batch-openapi.json Source: - discriminated: false + discriminant: type + base-properties: {} union: - - SourceUrl - - SourceFile - - SourceTextSource + url: SourceUrl + file: SourceFile + text: SourceTextSource source: openapi: batch-openapi.json SourceFile: - properties: - type: literal<"file"> + properties: {} extends: - File source: openapi: batch-openapi.json SourceTextSource: - properties: - type: literal<"text"> + properties: {} source: openapi: batch-openapi.json SourceUrl: - properties: - type: literal<"url"> + properties: {} extends: - Url source: openapi: batch-openapi.json + Url: + properties: + url: + type: string + docs: The URL of the source media file. + source: + openapi: batch-openapi.json StateEmbeddingGeneration: - discriminated: false + discriminant: status + base-properties: {} union: - - StateEmbeddingGenerationQueued - - StateEmbeddingGenerationInProgress - - StateEmbeddingGenerationCompletedEmbeddingGeneration - - StateEmbeddingGenerationFailed + QUEUED: StateEmbeddingGenerationQueued + IN_PROGRESS: StateEmbeddingGenerationInProgress + COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration + FAILED: StateEmbeddingGenerationFailed source: openapi: batch-openapi.json StateEmbeddingGenerationCompletedEmbeddingGeneration: - properties: - status: literal<"COMPLETED"> + properties: {} extends: - CompletedEmbeddingGeneration source: openapi: batch-openapi.json StateEmbeddingGenerationFailed: - properties: - status: literal<"FAILED"> + properties: {} extends: - Failed source: openapi: batch-openapi.json StateEmbeddingGenerationInProgress: - properties: - status: literal<"IN_PROGRESS"> + properties: {} extends: - InProgress source: openapi: batch-openapi.json StateEmbeddingGenerationQueued: - properties: - status: literal<"QUEUED"> + properties: {} extends: - Queued source: openapi: batch-openapi.json StateInference: - discriminated: false + discriminant: status + base-properties: {} union: - - QueuedState - - InProgressState - - CompletedState - - FailedState + QUEUED: QueuedState + IN_PROGRESS: InProgressState + COMPLETED: CompletedState + FAILED: FailedState source: openapi: batch-openapi.json CompletedState: - properties: - status: literal<"COMPLETED"> + properties: {} extends: - CompletedInference source: openapi: batch-openapi.json FailedState: - properties: - status: literal<"FAILED"> + properties: {} extends: - Failed source: openapi: batch-openapi.json InProgressState: - properties: - status: literal<"IN_PROGRESS"> + properties: {} extends: - InProgress source: openapi: batch-openapi.json QueuedState: - properties: - status: literal<"QUEUED"> + properties: {} extends: - Queued source: openapi: batch-openapi.json StateTlInference: - discriminated: false + discriminant: status + base-properties: {} union: - - StateTlInferenceQueued - - StateTlInferenceInProgress - - StateTlInferenceCompletedTlInference - - StateTlInferenceFailed + QUEUED: StateTlInferenceQueued + IN_PROGRESS: StateTlInferenceInProgress + COMPLETED: StateTlInferenceCompletedTlInference + FAILED: StateTlInferenceFailed source: openapi: batch-openapi.json StateTlInferenceCompletedTlInference: - properties: - status: literal<"COMPLETED"> + properties: {} extends: - CompletedTlInference source: openapi: batch-openapi.json StateTlInferenceFailed: - properties: - status: literal<"FAILED"> + properties: {} extends: - Failed source: openapi: batch-openapi.json StateTlInferenceInProgress: - properties: - status: literal<"IN_PROGRESS"> + properties: {} extends: - InProgress source: openapi: batch-openapi.json StateTlInferenceQueued: - properties: - status: literal<"QUEUED"> + properties: {} extends: - Queued source: openapi: batch-openapi.json StateTraining: - discriminated: false + discriminant: status + base-properties: {} union: - - StateTrainingQueued - - StateTrainingInProgress - - StateTrainingCompletedTraining - - StateTrainingFailed + QUEUED: StateTrainingQueued + IN_PROGRESS: StateTrainingInProgress + COMPLETED: StateTrainingCompletedTraining + FAILED: StateTrainingFailed source: openapi: batch-openapi.json StateTrainingCompletedTraining: - properties: - status: literal<"COMPLETED"> + properties: {} extends: - CompletedTraining source: openapi: batch-openapi.json StateTrainingFailed: - properties: - status: literal<"FAILED"> + properties: {} extends: - Failed source: openapi: batch-openapi.json StateTrainingInProgress: - properties: - status: literal<"IN_PROGRESS"> + properties: {} extends: - InProgress source: openapi: batch-openapi.json StateTrainingQueued: - properties: - status: literal<"QUEUED"> + properties: {} extends: - Queued source: @@ -1472,20 +1472,19 @@ types: source: openapi: batch-openapi.json Task: - discriminated: false + discriminant: type + base-properties: {} union: - - TaskClassification - - TaskRegression + classification: TaskClassification + regression: TaskRegression source: openapi: batch-openapi.json TaskClassification: - properties: - type: literal<"classification"> + properties: {} source: openapi: batch-openapi.json TaskRegression: - properties: - type: literal<"regression"> + properties: {} source: openapi: batch-openapi.json TextSource: map @@ -1730,13 +1729,6 @@ types: source: openapi: batch-openapi.json UnionPredictResult: InferenceSourcePredictResult - Url: - properties: - url: - type: string - docs: The URL of the source media file. - source: - openapi: batch-openapi.json ValidationArgs: properties: positive_label: optional diff --git a/.mock/definition/expression-measurement/stream/__package__.yml b/.mock/definition/expression-measurement/stream/__package__.yml index a279594e..f155235f 100644 --- a/.mock/definition/expression-measurement/stream/__package__.yml +++ b/.mock/definition/expression-measurement/stream/__package__.yml @@ -12,6 +12,7 @@ channel: publish: origin: client body: + display-name: Models endpoint payload type: StreamModelsEndpointPayload docs: Models endpoint payload examples: @@ -119,26 +120,33 @@ types: docs: > If a payload ID was passed in the request, the same payload ID will be sent back in the response body. + display-name: Model predictions job_details: type: optional docs: > If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. + display-name: Model predictions burst: type: optional docs: Response for the vocal burst emotion model. + display-name: Model predictions face: type: optional docs: Response for the facial expression emotion model. + display-name: Model predictions facemesh: type: optional docs: Response for the facemesh emotion model. + display-name: Model predictions language: type: optional docs: Response for the language emotion model. + display-name: Model predictions prosody: type: optional docs: Response for the speech prosody emotion model. + display-name: Model predictions source: openapi: streaming-asyncapi.yml JobDetails: @@ -157,19 +165,23 @@ types: error: type: optional docs: Error message text. + display-name: Error message code: type: optional docs: Unique identifier for the error. + display-name: Error message payload_id: type: optional docs: > If a payload ID was passed in the request, the same payload ID will be sent back in the response body. + display-name: Error message job_details: type: optional docs: > If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. + display-name: Error message source: openapi: streaming-asyncapi.yml StreamWarningMessageJobDetails: @@ -188,29 +200,36 @@ types: warning: type: optional docs: Warning message text. + display-name: Warning message code: type: optional docs: Unique identifier for the error. + display-name: Warning message payload_id: type: optional docs: > If a payload ID was passed in the request, the same payload ID will be sent back in the response body. + display-name: Warning message job_details: type: optional docs: > If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. + display-name: Warning message source: openapi: streaming-asyncapi.yml SubscribeEvent: discriminated: false union: - - type: StreamModelPredictions + - display-name: Model predictions + type: StreamModelPredictions docs: Model predictions - - type: StreamErrorMessage + - display-name: Error message + type: StreamErrorMessage docs: Error message - - type: StreamWarningMessage + - display-name: Warning message + type: StreamWarningMessage docs: Warning message source: openapi: streaming-asyncapi.yml @@ -331,12 +350,15 @@ types: StreamModelsEndpointPayload: docs: Models endpoint payload properties: - data: optional + data: + type: optional + display-name: Models endpoint payload models: type: optional docs: > Configuration used to specify which models should be used and with what settings. + display-name: Models endpoint payload stream_window_ms: type: optional docs: > @@ -364,6 +386,7 @@ types: validation: min: 500 max: 10000 + display-name: Models endpoint payload reset_stream: type: optional docs: > @@ -378,6 +401,7 @@ types: Use reset_stream when one audio file is done being processed and you do not want context to leak across files. default: false + display-name: Models endpoint payload raw_text: type: optional docs: > @@ -388,6 +412,7 @@ types: the language model, but it cannot be used with other file types like audio, image, or video. default: false + display-name: Models endpoint payload job_details: type: optional docs: > @@ -401,6 +426,7 @@ types: This parameter is useful to get the unique job ID. default: false + display-name: Models endpoint payload payload_id: type: optional docs: > @@ -411,8 +437,13 @@ types: This can be useful if you have multiple requests running asynchronously and want to disambiguate responses as they are received. - face: optional - language: optional + display-name: Models endpoint payload + face: + type: optional + display-name: Models endpoint payload + language: + type: optional + display-name: Models endpoint payload source: openapi: streaming-asyncapi.yml EmotionEmbeddingItem: diff --git a/.mock/fern.config.json b/.mock/fern.config.json index ba052cad..7e7445ee 100644 --- a/.mock/fern.config.json +++ b/.mock/fern.config.json @@ -1,4 +1,4 @@ { "organization" : "hume", - "version" : "0.41.9" + "version" : "0.41.16" } \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index d4f88207..7e475461 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1056,13 +1056,13 @@ notebook = "*" [[package]] name = "jupyter-client" -version = "8.6.2" +version = "8.6.3" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, - {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, ] [package.dependencies] @@ -1827,18 +1827,18 @@ files = [ [[package]] name = "pydantic" -version = "2.9.1" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" +pydantic-core = "2.23.4" typing-extensions = [ {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, {version = ">=4.6.1", markers = "python_version < \"3.13\""}, @@ -1850,100 +1850,100 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] @@ -3118,10 +3118,11 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", type = ["pytest-mypy"] [extras] +examples = ["jupyter"] legacy = ["pydub"] microphone = ["pydub", "simpleaudio", "sounddevice"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "57f224588709bb4851d574103df63389191c95207cf5e1a2d2df4e19475a8a3f" +content-hash = "6c05f984cb5e08248fe440717430eba30a9ceff1f453583f6f7576cf65ca0979" diff --git a/pyproject.toml b/pyproject.toml index 3c8bf8d7..a81b1c38 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,24 +1,9 @@ [tool.poetry] -authors = [] -classifiers = [ - "Intended Audience :: Developers", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Operating System :: OS Independent", - "Operating System :: POSIX", - "Operating System :: MacOS", - "Operating System :: POSIX :: Linux", - "Operating System :: Microsoft :: Windows", - "Topic :: Software Development :: Libraries :: Python Modules", - "Typing :: Typed", - "License :: OSI Approved :: MIT License", -] +name = "hume" +version = "0.6.1" description = "A Python SDK for Hume AI" +readme = "README.md" +authors = [] keywords = [ "hume", "ai", @@ -39,13 +24,30 @@ keywords = [ "expressive", "embeddings", "communication", - "learning", + "learning" ] license = "MIT" -name = "hume" -packages = [{ include = "hume", from = "src" }] -readme = "README.md" -version = "0.7.0-rc3" +classifiers = [ + "Intended Audience :: Developers", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Operating System :: OS Independent", + "Operating System :: POSIX", + "Operating System :: MacOS", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", + "License :: OSI Approved :: MIT License" +] +packages = [ + { include = "hume", from = "src"} +] [project.urls] Documentation = 'https://dev.hume.ai' @@ -53,38 +55,38 @@ Homepage = 'https://www.hume.ai/' Repository = 'https://github.com/HumeAI/hume-python-sdk' [tool.poetry.dependencies] +python = ">=3.9,<4" aiofiles = "^24.1.0" eval-type-backport = "^0.2.0" httpx = ">=0.21.2" -jupyter = { version = "^1.0.0", optional = true } +jupyter = { version = "^1.0.0", optional = true} pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" -pydub = { version = "^0.25.1", optional = true } -python = ">=3.9,<4" -simpleaudio = { version = "^1.0.4", optional = true } -sounddevice = { version = "^0.4.6", optional = true } +pydub = { version = "^0.25.1", optional = true} +simpleaudio = { version = "^1.0.4", optional = true} +sounddevice = { version = "^0.4.6", optional = true} typing_extensions = ">= 4.0.0" websockets = "12.0" [tool.poetry.dev-dependencies] -covcheck = { version = "^0.4.3", extras = ["toml"] } mypy = "1.0.1" +pytest = "^7.4.0" +pytest-asyncio = "^0.23.5" +python-dateutil = "^2.9.0" types-python-dateutil = "^2.9.0.20240316" +covcheck = { version = "^0.4.3", extras = ["toml"]} pydocstyle = "^6.1.1" pydub-stubs = "^0.25.1" pylint = "^2.16.2" -pytest = "^7.4.0" -pytest-asyncio = "^0.23.5" pytest-cov = "^4.0.0" -python-dateutil = "^2.9.0" ruff = "^0.5.6" semver = "^2.13.0" testbook = "^0.4.2" types-aiofiles = "^24.1.0.20240626" [tool.pytest.ini_options] +testpaths = [ "tests" ] asyncio_mode = "auto" -testpaths = ["tests"] [tool.mypy] plugins = ["pydantic.mypy"] @@ -92,10 +94,12 @@ plugins = ["pydantic.mypy"] [tool.ruff] line-length = 120 + [build-system] -build-backend = "poetry.core.masonry.api" requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" [tool.poetry.extras] -legacy = ["pydub"] -microphone = ["pydub", "simpleaudio", "sounddevice"] +examples=["jupyter"] +microphone=["pydub", "simpleaudio", "sounddevice"] +legacy=["pydub"] diff --git a/reference.md b/reference.md index 17e7c182..44bfdb9f 100644 --- a/reference.md +++ b/reference.md @@ -1,23 +1,9 @@ # Reference -## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) -
-
- -#### 📝 Description - -
-
- +## EmpathicVoice Tools +
client.empathic_voice.tools.list_tools(...)
-Sort and filter jobs. -
-
-
-
- #### 🔌 Usage
@@ -32,7 +18,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.list_jobs() +response = client.empathic_voice.tools.list_tools( + page_number=0, + page_size=2, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ```
@@ -48,33 +42,11 @@ client.expression_measurement.batch.list_jobs()
-**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. - -
-
- -
-
- -**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` - -Include only jobs of this status in the response. There are four possible statuses: - -- `QUEUED`: The job has been received and is waiting to be processed. - -- `IN_PROGRESS`: The job is currently being processed. - -- `COMPLETED`: The job has finished processing. - -- `FAILED`: The job encountered an error and could not be completed successfully. - -
-
+**page_number:** `typing.Optional[int]` -
-
+Specifies the page number to retrieve, enabling pagination. -**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -82,11 +54,11 @@ Include only jobs of this status in the response. There are four possible status
-**timestamp_ms:** `typing.Optional[int]` +**page_size:** `typing.Optional[int]` -Provide a timestamp in milliseconds to filter jobs. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -94,15 +66,7 @@ When combined with the `when` parameter, you can filter jobs before or after the
-**sort_by:** `typing.Optional[SortBy]` - -Specify which timestamp to sort the jobs by. - -- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. - -- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. - -- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -110,13 +74,7 @@ Specify which timestamp to sort the jobs by.
-**direction:** `typing.Optional[Direction]` - -Specify the order in which to sort the jobs. Defaults to descending order. - -- `asc`: Sort in ascending order (chronological, with the oldest records first). - -- `desc`: Sort in descending order (reverse-chronological, with the newest records first). +**name:** `typing.Optional[str]` — Filter to only include tools with this name.
@@ -136,24 +94,10 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) -
-
- -#### 📝 Description - -
-
- +
client.empathic_voice.tools.create_tool(...)
-Start a new measurement inference job. -
-
-
-
- #### 🔌 Usage
@@ -168,9 +112,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job( - urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], - notify=True, +client.empathic_voice.tools.create_tool( + name="get_current_weather", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", + description="This tool is for getting the current weather.", + fallback_content="Unable to fetch current weather.", ) ``` @@ -187,19 +134,7 @@ client.expression_measurement.batch.start_inference_job(
-**models:** `typing.Optional[Models]` - -Specify the models to use for inference. - -If this field is not explicitly set, then all models will run by default. - -
-
- -
-
- -**transcription:** `typing.Optional[Transcription]` +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -207,11 +142,11 @@ If this field is not explicitly set, then all models will run by default.
-**urls:** `typing.Optional[typing.Sequence[str]]` +**parameters:** `str` -URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. +Stringified JSON defining the parameters used by this version of the Tool. -If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
@@ -219,7 +154,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -227,7 +162,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function.
@@ -235,7 +170,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure. +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -255,24 +190,10 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) -
-
- -#### 📝 Description - -
-
- +
client.empathic_voice.tools.list_tool_versions(...)
-Get the request details and state of a given job. -
-
-
-
- #### 🔌 Usage
@@ -287,8 +208,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_details( - id="job_id", +client.empathic_voice.tools.list_tool_versions( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) ``` @@ -305,7 +226,7 @@ client.expression_measurement.batch.get_job_details(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -313,35 +234,53 @@ client.expression_measurement.batch.get_job_details(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
+**page_number:** `typing.Optional[int]` +Specifies the page number to retrieve, enabling pagination. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + -
-
client.expression_measurement.batch.get_job_predictions(...)
-#### 📝 Description +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. + +
+
+
-Get the JSON predictions of a completed inference job. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + +
+ +
client.empathic_voice.tools.create_tool_version(...) +
+
#### 🔌 Usage @@ -357,8 +296,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_predictions( - id="job_id", +client.empathic_voice.tools.create_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", + fallback_content="Unable to fetch current weather.", + description="This tool is for getting the current weather.", ) ``` @@ -375,7 +318,7 @@ client.expression_measurement.batch.get_job_predictions(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -383,36 +326,58 @@ client.expression_measurement.batch.get_job_predictions(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**parameters:** `str` + +Stringified JSON defining the parameters used by this version of the Tool. + +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
-
-
+
+
+**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +
-
-
client.expression_measurement.batch.get_job_artifacts(...)
-#### 📝 Description +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. + +
+
+**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. + +
+
+
-Get the artifacts ZIP of a completed inference job. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +
+ + + +
+ +
client.empathic_voice.tools.delete_tool(...) +
+
+ #### 🔌 Usage
@@ -427,8 +392,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_artifacts( - id="string", +client.empathic_voice.tools.delete_tool( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) ``` @@ -445,7 +410,7 @@ client.expression_measurement.batch.get_job_artifacts(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -465,11 +430,11 @@ client.expression_measurement.batch.get_job_artifacts(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
client.empathic_voice.tools.update_tool_name(...)
-#### 📝 Description +#### 🔌 Usage
@@ -477,27 +442,16 @@ client.expression_measurement.batch.get_job_artifacts(
-Start a new batch inference job. -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from hume import HumeClient +```python +from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job_from_local_file() +client.empathic_voice.tools.update_tool_name( + id="00183a3f-79ba-413d-9f3b-609864268bea", + name="get_current_temperature", +) ```
@@ -513,9 +467,7 @@ client.expression_measurement.batch.start_inference_job_from_local_file()
-**file:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -523,7 +475,7 @@ typing.List[core.File]` — See core.File for more documentation
-**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration. +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -543,8 +495,7 @@ typing.List[core.File]` — See core.File for more documentation
-## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) +
client.empathic_voice.tools.get_tool_version(...)
@@ -562,15 +513,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.tools.list_tools( - page_number=0, - page_size=2, +client.empathic_voice.tools.get_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -586,11 +532,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -598,27 +540,13 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
+**version:** `int` -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**name:** `typing.Optional[str]` — Filter to only include tools with this name. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -638,7 +566,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) +
client.empathic_voice.tools.delete_tool_version(...)
@@ -656,12 +584,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool( - name="get_current_weather", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", - description="This tool is for getting the current weather.", - fallback_content="Unable to fetch current weather.", +client.empathic_voice.tools.delete_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -678,19 +603,7 @@ client.empathic_voice.tools.create_tool(
-**name:** `str` — Name applied to all versions of a particular Tool. - -
-
- -
-
- -**parameters:** `str` - -Stringified JSON defining the parameters used by this version of the Tool. - -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -698,23 +611,13 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. - -
-
- -
-
+**version:** `int` -**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -734,7 +637,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.list_tool_versions(...) +
client.empathic_voice.tools.update_tool_description(...)
@@ -752,8 +655,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.list_tool_versions( +client.empathic_voice.tools.update_tool_description( id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, + version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", ) ``` @@ -778,23 +683,13 @@ client.empathic_voice.tools.list_tool_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
+**version:** `int` -**page_size:** `typing.Optional[int]` +Version number for a Tool. -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -802,7 +697,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -822,7 +717,8 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) +## EmpathicVoice Prompts +
client.empathic_voice.prompts.list_prompts(...)
@@ -840,13 +736,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", - fallback_content="Unable to fetch current weather.", - description="This tool is for getting the current weather.", +response = client.empathic_voice.prompts.list_prompts( + page_number=0, + page_size=2, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ```
@@ -862,19 +760,11 @@ client.empathic_voice.tools.create_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**parameters:** `str` +**page_number:** `typing.Optional[int]` -Stringified JSON defining the parameters used by this version of the Tool. +Specifies the page number to retrieve, enabling pagination. -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -882,7 +772,11 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -890,7 +784,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false.
@@ -898,7 +792,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +**name:** `typing.Optional[str]` — Filter to only include prompts with this name.
@@ -918,7 +812,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.delete_tool(...) +
client.empathic_voice.prompts.create_prompt(...)
@@ -936,8 +830,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool( - id="00183a3f-79ba-413d-9f3b-609864268bea", +client.empathic_voice.prompts.create_prompt( + name="Weather Assistant Prompt", + text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", ) ``` @@ -954,7 +849,29 @@ client.empathic_voice.tools.delete_tool(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**name:** `str` — Name applied to all versions of a particular Prompt. + +
+
+ +
+
+ +**text:** `str` + +Instructions used to shape EVI’s behavior, responses, and style. + +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. + +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -974,7 +891,7 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) +
client.empathic_voice.prompts.list_prompt_versions(...)
@@ -992,9 +909,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_name( - id="00183a3f-79ba-413d-9f3b-609864268bea", - name="get_current_temperature", +client.empathic_voice.prompts.list_prompt_versions( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1011,7 +927,7 @@ client.empathic_voice.tools.update_tool_name(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1019,7 +935,31 @@ client.empathic_voice.tools.update_tool_name(
-**name:** `str` — Name applied to all versions of a particular Tool. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false.
@@ -1039,7 +979,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) +
client.empathic_voice.prompts.create_prompt_verison(...)
@@ -1057,9 +997,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.get_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.create_prompt_verison( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + version_description="This is an updated version of the Weather Assistant Prompt.", ) ``` @@ -1076,7 +1017,7 @@ client.empathic_voice.tools.get_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1084,13 +1025,21 @@ client.empathic_voice.tools.get_tool_version(
-**version:** `int` +**text:** `str` -Version number for a Tool. +Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1110,7 +1059,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.delete_tool_version(...) +
client.empathic_voice.prompts.delete_prompt(...)
@@ -1128,9 +1077,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.delete_prompt( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1147,21 +1095,7 @@ client.empathic_voice.tools.delete_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1181,7 +1115,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.update_tool_description(...) +
client.empathic_voice.prompts.update_prompt_name(...)
@@ -1199,10 +1133,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_description( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, - version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", +client.empathic_voice.prompts.update_prompt_name( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + name="Updated Weather Assistant Prompt Name", ) ``` @@ -1219,21 +1152,7 @@ client.empathic_voice.tools.update_tool_description(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1241,7 +1160,7 @@ Version numbers are integer values representing different iterations of the Tool
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1261,8 +1180,7 @@ Version numbers are integer values representing different iterations of the Tool
-## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) +
client.empathic_voice.prompts.get_prompt_version(...)
@@ -1280,15 +1198,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.prompts.list_prompts( - page_number=0, - page_size=2, +client.empathic_voice.prompts.get_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -1304,11 +1217,21 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**id:** `str` — Identifier for a Prompt. Formatted as a UUID. + +
+
-Specifies the page number to retrieve, enabling pagination. +
+
-This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**version:** `int` + +Version number for a Prompt. + +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1316,19 +1239,56 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ +
-Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - +
+
client.empathic_voice.prompts.delete_prompt_version(...)
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.prompts.delete_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1336,7 +1296,13 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include prompts with this name. +**version:** `int` + +Version number for a Prompt. + +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1356,7 +1322,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) +
client.empathic_voice.prompts.update_prompt_description(...)
@@ -1374,9 +1340,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt( - name="Weather Assistant Prompt", - text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", +client.empathic_voice.prompts.update_prompt_description( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -1393,7 +1360,7 @@ client.empathic_voice.prompts.create_prompt(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1401,13 +1368,13 @@ client.empathic_voice.prompts.create_prompt(
-**text:** `str` +**version:** `int` -Instructions used to shape EVI’s behavior, responses, and style. +Version number for a Prompt. -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1435,7 +1402,8 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.list_prompt_versions(...) +## EmpathicVoice CustomVoices +
client.empathic_voice.custom_voices.get_return_custom_voices_for_user(...)
@@ -1453,9 +1421,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.list_prompt_versions( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", -) +client.empathic_voice.custom_voices.get_return_custom_voices_for_user() ```
@@ -1471,14 +1437,6 @@ client.empathic_voice.prompts.list_prompt_versions(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- **page_number:** `typing.Optional[int]` Specifies the page number to retrieve, enabling pagination. @@ -1503,7 +1461,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. +**name:** `typing.Optional[str]` — Filter to only include custom voices with this name.
@@ -1523,7 +1481,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_verison(...) +
client.empathic_voice.custom_voices.create_new_custom_voice(...)
@@ -1541,10 +1499,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt_verison( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", - version_description="This is an updated version of the Weather Assistant Prompt.", +client.empathic_voice.custom_voices.create_new_custom_voice( + name="name", + base_voice="ITO", ) ``` @@ -1561,7 +1518,7 @@ client.empathic_voice.prompts.create_prompt_verison(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -1569,13 +1526,7 @@ client.empathic_voice.prompts.create_prompt_verison(
-**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. - -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. - -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice.
@@ -1583,7 +1534,11 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -1603,7 +1558,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.delete_prompt(...) +
client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(...)
@@ -1621,8 +1576,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", +client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id( + id="id", ) ``` @@ -1639,7 +1594,7 @@ client.empathic_voice.prompts.delete_prompt(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1659,7 +1614,7 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) +
client.empathic_voice.custom_voices.add_new_custom_voice_version(...)
@@ -1677,9 +1632,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_name( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - name="Updated Weather Assistant Prompt Name", +client.empathic_voice.custom_voices.add_new_custom_voice_version( + id="id", + name="name", + base_voice="ITO", ) ``` @@ -1696,7 +1652,7 @@ client.empathic_voice.prompts.update_prompt_name(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1704,7 +1660,27 @@ client.empathic_voice.prompts.update_prompt_name(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
+ +
+
+ +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -1724,7 +1700,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) +
client.empathic_voice.custom_voices.delete_custom_voice(...)
@@ -1742,9 +1718,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.get_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, +client.empathic_voice.custom_voices.delete_custom_voice( + id="id", ) ``` @@ -1761,21 +1736,7 @@ client.empathic_voice.prompts.get_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1795,7 +1756,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.delete_prompt_version(...) +
client.empathic_voice.custom_voices.update_custom_voice_name(...)
@@ -1813,9 +1774,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, +client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", + name="string", ) ``` @@ -1832,7 +1793,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1840,13 +1801,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -1866,7 +1821,8 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.update_prompt_description(...) +## EmpathicVoice Configs +
client.empathic_voice.configs.list_configs(...)
@@ -1884,10 +1840,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_description( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, - version_description="This is an updated version_description.", +client.empathic_voice.configs.list_configs( + page_number=0, + page_size=1, ) ``` @@ -1904,7 +1859,11 @@ client.empathic_voice.prompts.update_prompt_description(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -1912,13 +1871,19 @@ client.empathic_voice.prompts.update_prompt_description(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Prompt. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false.
@@ -1926,7 +1891,7 @@ Version numbers are integer values representing different iterations of the Prom
-**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**name:** `typing.Optional[str]` — Filter to only include configs with this name.
@@ -1946,8 +1911,7 @@ Version numbers are integer values representing different iterations of the Prom
-## EmpathicVoice CustomVoices -
client.empathic_voice.custom_voices.get_return_custom_voices_for_user(...) +
client.empathic_voice.configs.create_config(...)
@@ -1961,11 +1925,47 @@ Version numbers are integer values representing different iterations of the Prom ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.get_return_custom_voices_for_user() +client.empathic_voice.configs.create_config( + name="Weather Assistant Config", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + evi_version="2", + voice=PostedVoice( + name="SAMPLE VOICE", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), +) ```
@@ -1981,11 +1981,7 @@ client.empathic_voice.custom_voices.get_return_custom_voices_for_user()
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2).
@@ -1993,11 +1989,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**name:** `str` — Name applied to all versions of a particular Config.
@@ -2005,7 +1997,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include custom voices with this name. +**version_description:** `typing.Optional[str]` — An optional description of the Config version.
@@ -2013,64 +2005,27 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**prompt:** `typing.Optional[PostedConfigPromptSpec]`
- -
- - - - -
- -
client.empathic_voice.custom_voices.create_new_custom_voice(...) -
-
- -#### 🔌 Usage - -
-
-```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.create_new_custom_voice( - name="name", - base_voice="ITO", -) - -``` -
-
+**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. +
-#### ⚙️ Parameters - -
-
-
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") - -
-
+**language_model:** `typing.Optional[PostedLanguageModel]` -
-
+The supplemental language model associated with this Config. -**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI.
@@ -2078,11 +2033,11 @@ client.empathic_voice.custom_voices.create_new_custom_voice(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` +**ellm_model:** `typing.Optional[PostedEllmModel]` -The specified attributes of a Custom Voice. +The eLLM setup associated with this Config. -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody.
@@ -2090,55 +2045,31 @@ If no parameters are specified then all attributes will be set to their defaults
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config.
-
-
- - -
-
-
-
client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(...)
-#### 🔌 Usage - -
-
+**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. + +
+
-```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id( - id="id", -) - -``` -
-
+**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +
-#### ⚙️ Parameters -
-
-
- -**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -2158,7 +2089,7 @@ client.empathic_voice.custom_voices.get_return_custom_voice_by_custom_voice_id(
-
client.empathic_voice.custom_voices.add_new_custom_voice_version(...) +
client.empathic_voice.configs.list_config_versions(...)
@@ -2176,10 +2107,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.add_new_custom_voice_version( - id="id", - name="name", - base_voice="ITO", +client.empathic_voice.configs.list_config_versions( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -2196,7 +2125,7 @@ client.empathic_voice.custom_voices.add_new_custom_voice_version(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2204,7 +2133,11 @@ client.empathic_voice.custom_voices.add_new_custom_voice_version(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -2212,7 +2145,11 @@ client.empathic_voice.custom_voices.add_new_custom_voice_version(
-**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -2220,11 +2157,7 @@ client.empathic_voice.custom_voices.add_new_custom_voice_version(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false.
@@ -2244,7 +2177,7 @@ If no parameters are specified then all attributes will be set to their defaults
-
client.empathic_voice.custom_voices.delete_custom_voice(...) +
client.empathic_voice.configs.create_config_version(...)
@@ -2258,15 +2191,54 @@ If no parameters are specified then all attributes will be set to their defaults ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEllmModel, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.delete_custom_voice( - id="id", -) - -``` +client.empathic_voice.configs.create_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version_description="This is an updated version of the Weather Assistant Config.", + evi_version="2", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + voice=PostedVoice( + name="ITO", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + ellm_model=PostedEllmModel( + allow_short_responses=True, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), +) + +```
@@ -2280,7 +2252,7 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2288,56 +2260,79 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**evi_version:** `str` — The version of the EVI used with this config.
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Config version. +
+
+
+**prompt:** `typing.Optional[PostedConfigPromptSpec]` +
-
-
client.empathic_voice.custom_voices.update_custom_voice_name(...)
-#### 🔌 Usage +**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. + +
+
+**language_model:** `typing.Optional[PostedLanguageModel]` + +The supplemental language model associated with this Config version. + +This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. + +
+
+
-```python -from hume import HumeClient +**ellm_model:** `typing.Optional[PostedEllmModel]` -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.update_custom_voice_name( - id="string", - name="string", -) +The eLLM setup associated with this Config version. -``` +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. +
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. +
-#### ⚙️ Parameters -
+**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. + +
+
+
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**event_messages:** `typing.Optional[PostedEventMessageSpecs]`
@@ -2345,7 +2340,7 @@ client.empathic_voice.custom_voices.update_custom_voice_name(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -2365,8 +2360,7 @@ client.empathic_voice.custom_voices.update_custom_voice_name(
-## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) +
client.empathic_voice.configs.delete_config(...)
@@ -2384,9 +2378,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_configs( - page_number=0, - page_size=1, +client.empathic_voice.configs.delete_config( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -2403,39 +2396,7 @@ client.empathic_voice.configs.list_configs(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include configs with this name. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2455,7 +2416,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) +
client.empathic_voice.configs.update_config_name(...)
@@ -2469,46 +2430,13 @@ For example, if `page_size` is set to 10, each page will include up to 10 items. ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config( - name="Weather Assistant Config", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - evi_version="2", - voice=PostedVoice( - name="SAMPLE VOICE", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), +client.empathic_voice.configs.update_config_name( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + name="Updated Weather Assistant Config Name", ) ``` @@ -2525,7 +2453,7 @@ client.empathic_voice.configs.create_config(
-**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2541,71 +2469,56 @@ client.empathic_voice.configs.create_config(
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**prompt:** `typing.Optional[PostedConfigPromptSpec]` -
-
-
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. -
+
+
client.empathic_voice.configs.get_config_version(...)
-**language_model:** `typing.Optional[PostedLanguageModel]` - -The supplemental language model associated with this Config. +#### 🔌 Usage -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - -
-
+
+
-**ellm_model:** `typing.Optional[PostedEllmModel]` +```python +from hume import HumeClient -The eLLM setup associated with this Config. +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.configs.get_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, +) -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +```
- -
-
- -**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. -
+#### ⚙️ Parameters +
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. - -
-
-
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2613,7 +2526,13 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -2633,7 +2552,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.list_config_versions(...) +
client.empathic_voice.configs.delete_config_version(...)
@@ -2651,8 +2570,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_config_versions( +client.empathic_voice.configs.delete_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, ) ``` @@ -2677,31 +2597,13 @@ client.empathic_voice.configs.list_config_versions(
-**page_number:** `typing.Optional[int]` +**version:** `int` -Specifies the page number to retrieve, enabling pagination. +Version number for a Config. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -2721,7 +2623,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) +
client.empathic_voice.configs.update_config_description(...)
@@ -2735,51 +2637,14 @@ For example, if `page_size` is set to 10, each page will include up to 10 items. ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEllmModel, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config_version( +client.empathic_voice.configs.update_config_description( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version_description="This is an updated version of the Weather Assistant Config.", - evi_version="2", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - voice=PostedVoice( - name="ITO", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - ellm_model=PostedEllmModel( - allow_short_responses=True, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), + version=1, + version_description="This is an updated version_description.", ) ``` @@ -2804,7 +2669,13 @@ client.empathic_voice.configs.create_config_version(
-**evi_version:** `str` — The version of the EVI used with this config. +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -2820,55 +2691,67 @@ client.empathic_voice.configs.create_config_version(
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. -
+
+## EmpathicVoice Chats +
client.empathic_voice.chats.list_chats(...)
-**language_model:** `typing.Optional[PostedLanguageModel]` - -The supplemental language model associated with this Config version. +#### 🔌 Usage -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - -
-
+
+
-**ellm_model:** `typing.Optional[PostedEllmModel]` +```python +from hume import HumeClient -The eLLM setup associated with this Config version. +client = HumeClient( + api_key="YOUR_API_KEY", +) +response = client.empathic_voice.chats.list_chats( + page_number=0, + page_size=1, + ascending_order=True, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +``` +
+
+#### ⚙️ Parameters +
-**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. - -
-
-
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -2876,7 +2759,11 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -2884,7 +2771,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -2904,7 +2791,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.delete_config(...) +
client.empathic_voice.chats.list_chat_events(...)
@@ -2922,9 +2809,17 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +response = client.empathic_voice.chats.list_chat_events( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", + page_number=0, + page_size=3, + ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ```
@@ -2940,7 +2835,7 @@ client.empathic_voice.configs.delete_config(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat. Formatted as a UUID.
@@ -2948,56 +2843,23 @@ client.empathic_voice.configs.delete_config(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- -
+**page_size:** `typing.Optional[int]` +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + -
- -
client.empathic_voice.configs.update_config_name(...) -
-
- -#### 🔌 Usage - -
-
-```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.configs.update_config_name( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - name="Updated Weather Assistant Config Name", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
+**page_number:** `typing.Optional[int]` -
-
+Specifies the page number to retrieve, enabling pagination. -**id:** `str` — Identifier for a Config. Formatted as a UUID. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3005,7 +2867,7 @@ client.empathic_voice.configs.update_config_name(
-**name:** `str` — Name applied to all versions of a particular Config. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3025,7 +2887,8 @@ client.empathic_voice.configs.update_config_name(
-
client.empathic_voice.configs.get_config_version(...) +## EmpathicVoice ChatGroups +
client.empathic_voice.chat_groups.list_chat_groups(...)
@@ -3043,9 +2906,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.get_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +client.empathic_voice.chat_groups.list_chat_groups( + page_number=0, + page_size=1, + ascending_order=True, + config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -3062,7 +2927,11 @@ client.empathic_voice.configs.get_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3070,13 +2939,31 @@ client.empathic_voice.configs.get_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. + +
+
+ +
+
+ +**config_id:** `typing.Optional[str]` + +The unique identifier for an EVI configuration. + +Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat.
@@ -3096,7 +2983,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.delete_config_version(...) +
client.empathic_voice.chat_groups.list_chat_group_events(...)
@@ -3114,9 +3001,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +client.empathic_voice.chat_groups.list_chat_group_events( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=3, + ascending_order=True, ) ``` @@ -3133,7 +3022,7 @@ client.empathic_voice.configs.delete_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
@@ -3141,13 +3030,31 @@ client.empathic_voice.configs.delete_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3167,10 +3074,25 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.update_config_description(...) +## ExpressionMeasurement Batch +
client.expression_measurement.batch.list_jobs(...) +
+
+ +#### 📝 Description +
+
+
+ +Sort and filter jobs. +
+
+
+
+ #### 🔌 Usage
@@ -3185,11 +3107,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_description( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, - version_description="This is an updated version_description.", -) +client.expression_measurement.batch.list_jobs() ```
@@ -3205,7 +3123,7 @@ client.empathic_voice.configs.update_config_description(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response.
@@ -3213,13 +3131,17 @@ client.empathic_voice.configs.update_config_description(
-**version:** `int` +**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` -Version number for a Config. +Include only jobs of this status in the response. There are four possible statuses: -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +- `QUEUED`: The job has been received and is waiting to be processed. -Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +- `IN_PROGRESS`: The job is currently being processed. + +- `COMPLETED`: The job has finished processing. + +- `FAILED`: The job encountered an error and could not be completed successfully.
@@ -3227,7 +3149,49 @@ Version numbers are integer values representing different iterations of the Conf
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. + +
+
+ +
+
+ +**timestamp_ms:** `typing.Optional[int]` + +Provide a timestamp in milliseconds to filter jobs. + +When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + +
+
+ +
+
+ +**sort_by:** `typing.Optional[SortBy]` + +Specify which timestamp to sort the jobs by. + +- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. + +- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. + +- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. + +
+
+ +
+
+ +**direction:** `typing.Optional[Direction]` + +Specify the order in which to sort the jobs. Defaults to descending order. + +- `asc`: Sort in ascending order (chronological, with the oldest records first). + +- `desc`: Sort in descending order (reverse-chronological, with the newest records first).
@@ -3247,11 +3211,24 @@ Version numbers are integer values representing different iterations of the Conf
-## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) +
client.expression_measurement.batch.start_inference_job(...)
+#### 📝 Description + +
+
+ +
+
+ +Start a new measurement inference job. +
+
+
+
+ #### 🔌 Usage
@@ -3266,16 +3243,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chats( - page_number=0, - page_size=1, - ascending_order=True, +client.expression_measurement.batch.start_inference_job( + urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], + notify=True, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -3291,11 +3262,11 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**models:** `typing.Optional[Models]` -Specifies the page number to retrieve, enabling pagination. +Specify the models to use for inference. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +If this field is not explicitly set, then all models will run by default.
@@ -3303,11 +3274,19 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**transcription:** `typing.Optional[Transcription]` + +
+
-Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**urls:** `typing.Optional[typing.Sequence[str]]` + +URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + +If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
@@ -3315,7 +3294,23 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. + +
+
+ +
+
+ +**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. + +
+
+ +
+
+ +**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure.
@@ -3335,10 +3330,24 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) +
client.expression_measurement.batch.get_job_details(...) +
+
+ +#### 📝 Description + +
+
+
+Get the request details and state of a given job. +
+
+
+
+ #### 🔌 Usage
@@ -3353,17 +3362,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chat_events( - id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", - page_number=0, - page_size=3, - ascending_order=True, +client.expression_measurement.batch.get_job_details( + id="job_id", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -3379,7 +3380,7 @@ for page in response.iter_pages():
-**id:** `str` — Identifier for a Chat. Formatted as a UUID. +**id:** `str` — The unique identifier for the job.
@@ -3387,54 +3388,35 @@ for page in response.iter_pages():
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. -
+
+
client.expression_measurement.batch.get_job_predictions(...)
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - -
-
+#### 📝 Description
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
+
+
+ +Get the JSON predictions of a completed inference job.
- - -
- -## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) -
-
#### 🔌 Usage @@ -3450,11 +3432,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_groups( - page_number=0, - page_size=1, - ascending_order=True, - config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.expression_measurement.batch.get_job_predictions( + id="job_id", ) ``` @@ -3471,43 +3450,77 @@ client.empathic_voice.chat_groups.list_chat_groups(
-**page_number:** `typing.Optional[int]` +**id:** `str` — The unique identifier for the job. + +
+
-Specifies the page number to retrieve, enabling pagination. +
+
-This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + +
+ +
client.expression_measurement.batch.get_job_artifacts(...)
-**page_size:** `typing.Optional[int]` +#### 📝 Description -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - +
+
+ +Get the artifacts ZIP of a completed inference job. +
+
+#### 🔌 Usage +
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.expression_measurement.batch.get_job_artifacts( + id="string", +) + +```
+
+
+ +#### ⚙️ Parameters
-**config_id:** `typing.Optional[str]` - -The unique identifier for an EVI configuration. +
+
-Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat. +**id:** `str` — The unique identifier for the job.
@@ -3527,10 +3540,24 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-
client.empathic_voice.chat_groups.list_chat_group_events(...) +
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
+
+ +#### 📝 Description + +
+
+
+Start a new batch inference job. +
+
+
+
+ #### 🔌 Usage
@@ -3545,12 +3572,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_group_events( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=3, - ascending_order=True, -) +client.expression_measurement.batch.start_inference_job_from_local_file() ```
@@ -3566,31 +3588,9 @@ client.empathic_voice.chat_groups.list_chat_group_events(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. +**file:** `from __future__ import annotations -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +typing.List[core.File]` — See core.File for more documentation
@@ -3598,7 +3598,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration.
diff --git a/src/hume/base_client.py b/src/hume/base_client.py index d293f18b..889ec357 100644 --- a/src/hume/base_client.py +++ b/src/hume/base_client.py @@ -4,11 +4,11 @@ from .environment import HumeClientEnvironment import httpx from .core.client_wrapper import SyncClientWrapper -from .expression_measurement.client import ExpressionMeasurementClient from .empathic_voice.client import EmpathicVoiceClient +from .expression_measurement.client import ExpressionMeasurementClient from .core.client_wrapper import AsyncClientWrapper -from .expression_measurement.client import AsyncExpressionMeasurementClient from .empathic_voice.client import AsyncEmpathicVoiceClient +from .expression_measurement.client import AsyncExpressionMeasurementClient class BaseHumeClient: @@ -69,8 +69,8 @@ def __init__( else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = EmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) class AsyncBaseHumeClient: @@ -131,8 +131,8 @@ def __init__( else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = AsyncEmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumeClientEnvironment) -> str: