diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index a967a3cd..a7b9c506 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -648,6 +648,62 @@ types: docs: The provider of the supplemental language model. source: openapi: stenographer-openapi.json + PostedLanguageModelModelResource: + enum: + - value: claude-3-5-sonnet-latest + name: Claude35SonnetLatest + - value: claude-3-5-sonnet-20240620 + name: Claude35Sonnet20240620 + - value: claude-3-opus-20240229 + name: Claude3Opus20240229 + - value: claude-3-sonnet-20240229 + name: Claude3Sonnet20240229 + - value: claude-3-haiku-20240307 + name: Claude3Haiku20240307 + - value: claude-2.1 + name: Claude21 + - value: claude-instant-1.2 + name: ClaudeInstant12 + - value: gemini-1.5-pro + name: Gemini15Pro + - value: gemini-1.5-flash + name: Gemini15Flash + - value: gemini-1.5-pro-002 + name: Gemini15Pro002 + - value: gemini-1.5-flash-002 + name: Gemini15Flash002 + - value: gpt-4-turbo-preview + name: Gpt4TurboPreview + - value: gpt-3.5-turbo-0125 + name: Gpt35Turbo0125 + - value: gpt-3.5-turbo + name: Gpt35Turbo + - value: gpt-4o + name: Gpt4O + - value: gpt-4o-mini + name: Gpt4OMini + - value: gemma-7b-it + name: Gemma7BIt + - value: llama3-8b-8192 + name: Llama38B8192 + - value: llama3-70b-8192 + name: Llama370B8192 + - value: llama-3.1-70b-versatile + name: Llama3170BVersatile + - value: llama-3.1-8b-instant + name: Llama318BInstant + - value: accounts/fireworks/models/mixtral-8x7b-instruct + name: AccountsFireworksModelsMixtral8X7BInstruct + - value: accounts/fireworks/models/llama-v3p1-405b-instruct + name: AccountsFireworksModelsLlamaV3P1405BInstruct + - value: accounts/fireworks/models/llama-v3p1-70b-instruct + name: AccountsFireworksModelsLlamaV3P170BInstruct + - value: accounts/fireworks/models/llama-v3p1-8b-instruct + name: AccountsFireworksModelsLlamaV3P18BInstruct + - ellm + docs: String that specifies the language model to use with `model_provider`. + source: + openapi: stenographer-openapi.json PostedLanguageModel: docs: A LanguageModel to be posted to the server properties: @@ -655,7 +711,7 @@ types: type: optional docs: The provider of the supplemental language model. model_resource: - type: optional + type: optional docs: String that specifies the language model to use with `model_provider`. temperature: type: optional @@ -784,14 +840,23 @@ types: of the Tool. Each update to the Tool increments its version number. source: openapi: stenographer-openapi.json + PostedVoiceProvider: + enum: + - HUME_AI + - CUSTOM_VOICE + docs: >- + The provider of the voice to use. Supported values are `HUME_AI` and + `CUSTOM_VOICE`. + source: + openapi: stenographer-openapi.json PostedVoice: docs: A Voice specification posted to the server properties: provider: - type: literal<"HUME_AI"> + type: PostedVoiceProvider docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. + The provider of the voice to use. Supported values are `HUME_AI` and + `CUSTOM_VOICE`. name: type: optional docs: >- @@ -999,6 +1064,62 @@ types: docs: The provider of the supplemental language model. source: openapi: stenographer-openapi.json + ReturnLanguageModelModelResource: + enum: + - value: claude-3-5-sonnet-latest + name: Claude35SonnetLatest + - value: claude-3-5-sonnet-20240620 + name: Claude35Sonnet20240620 + - value: claude-3-opus-20240229 + name: Claude3Opus20240229 + - value: claude-3-sonnet-20240229 + name: Claude3Sonnet20240229 + - value: claude-3-haiku-20240307 + name: Claude3Haiku20240307 + - value: claude-2.1 + name: Claude21 + - value: claude-instant-1.2 + name: ClaudeInstant12 + - value: gemini-1.5-pro + name: Gemini15Pro + - value: gemini-1.5-flash + name: Gemini15Flash + - value: gemini-1.5-pro-002 + name: Gemini15Pro002 + - value: gemini-1.5-flash-002 + name: Gemini15Flash002 + - value: gpt-4-turbo-preview + name: Gpt4TurboPreview + - value: gpt-3.5-turbo-0125 + name: Gpt35Turbo0125 + - value: gpt-3.5-turbo + name: Gpt35Turbo + - value: gpt-4o + name: Gpt4O + - value: gpt-4o-mini + name: Gpt4OMini + - value: gemma-7b-it + name: Gemma7BIt + - value: llama3-8b-8192 + name: Llama38B8192 + - value: llama3-70b-8192 + name: Llama370B8192 + - value: llama-3.1-70b-versatile + name: Llama3170BVersatile + - value: llama-3.1-8b-instant + name: Llama318BInstant + - value: accounts/fireworks/models/mixtral-8x7b-instruct + name: AccountsFireworksModelsMixtral8X7BInstruct + - value: accounts/fireworks/models/llama-v3p1-405b-instruct + name: AccountsFireworksModelsLlamaV3P1405BInstruct + - value: accounts/fireworks/models/llama-v3p1-70b-instruct + name: AccountsFireworksModelsLlamaV3P170BInstruct + - value: accounts/fireworks/models/llama-v3p1-8b-instruct + name: AccountsFireworksModelsLlamaV3P18BInstruct + - ellm + docs: String that specifies the language model to use with `model_provider`. + source: + openapi: stenographer-openapi.json ReturnLanguageModel: docs: A specific LanguageModel properties: @@ -1006,7 +1127,7 @@ types: type: optional docs: The provider of the supplemental language model. model_resource: - type: optional + type: optional docs: String that specifies the language model to use with `model_provider`. temperature: type: optional @@ -1071,14 +1192,23 @@ types: seconds. source: openapi: stenographer-openapi.json + ReturnVoiceProvider: + enum: + - HUME_AI + - CUSTOM_VOICE + docs: >- + The provider of the voice to use. Supported values are `HUME_AI` and + `CUSTOM_VOICE`. + source: + openapi: stenographer-openapi.json ReturnVoice: docs: A specific voice specification properties: provider: - type: literal<"HUME_AI"> + type: ReturnVoiceProvider docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. + The provider of the voice to use. Supported values are `HUME_AI` and + `CUSTOM_VOICE`. name: type: optional docs: >- @@ -1632,6 +1762,83 @@ types: config: optional source: openapi: stenographer-openapi.json + ReturnChatAudioReconstructionStatus: + enum: + - QUEUED + - IN_PROGRESS + - COMPLETE + - ERROR + - CANCELLED + docs: >- + Indicates the current state of the audio reconstruction job. There are + five possible statuses: + + + - `QUEUED`: The reconstruction job is waiting to be processed. + + + - `IN_PROGRESS`: The reconstruction is currently being processed. + + + - `COMPLETE`: The audio reconstruction is finished and ready for download. + + + - `ERROR`: An error occurred during the reconstruction process. + + + - `CANCELED`: The reconstruction job has been canceled. + source: + openapi: stenographer-openapi.json + ReturnChatAudioReconstruction: + docs: >- + List of chat audio reconstructions returned for the specified page number + and page size. + properties: + id: + type: string + docs: Identifier for the chat. Formatted as a UUID. + user_id: + type: string + docs: Identifier for the user that owns this chat. Formatted as a UUID. + status: + type: ReturnChatAudioReconstructionStatus + docs: >- + Indicates the current state of the audio reconstruction job. There are + five possible statuses: + + + - `QUEUED`: The reconstruction job is waiting to be processed. + + + - `IN_PROGRESS`: The reconstruction is currently being processed. + + + - `COMPLETE`: The audio reconstruction is finished and ready for + download. + + + - `ERROR`: An error occurred during the reconstruction process. + + + - `CANCELED`: The reconstruction job has been canceled. + filename: + type: optional + docs: Name of the chat audio reconstruction file. + modified_at: + type: optional + docs: >- + The timestamp of the most recent status change for this audio + reconstruction, formatted milliseconds since the Unix epoch. + signed_audio_url: + type: optional + docs: Signed URL used to download the chat audio reconstruction file. + signed_url_expiration_timestamp_millis: + type: optional + docs: >- + The timestamp when the signed URL will expire, formatted as a Unix + epoch milliseconds. + source: + openapi: stenographer-openapi.json ReturnActiveChatCount: docs: A description of current chat chat sessions for a user properties: @@ -1891,6 +2098,70 @@ types: type: list source: openapi: stenographer-openapi.json + ReturnChatGroupPagedAudioReconstructionsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedAudioReconstructions: + docs: A paginated list of chat reconstructions for a particular chatgroup + properties: + id: + type: string + docs: Identifier for the chat group. Formatted as a UUID. + user_id: + type: string + docs: Identifier for the user that owns this chat. Formatted as a UUID. + num_chats: + type: integer + docs: Total number of chats in this chatgroup + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnChatGroupPagedAudioReconstructionsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + audio_reconstructions_page: + docs: >- + List of chat audio reconstructions returned for the specified page + number and page size. + type: list + source: + openapi: stenographer-openapi.json PostedPromptSpec: docs: A Prompt associated with this Config. properties: diff --git a/.mock/definition/empathic-voice/chatGroups.yml b/.mock/definition/empathic-voice/chatGroups.yml index 81454a0e..7d3a5e9f 100644 --- a/.mock/definition/empathic-voice/chatGroups.yml +++ b/.mock/definition/empathic-voice/chatGroups.yml @@ -522,76 +522,5 @@ service: 0.022247314453125, "Tiredness": 0.0194549560546875, "Triumph": 0.04107666015625} metadata: '' - get-audio: - path: /v0/evi/chat_groups/{id}/audio - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a chat. Formatted as a UUID. - display-name: Get chat group audio - request: - name: ChatGroupsGetAudioRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Boolean to indicate if the results should be paginated in - chronological order or reverse-chronological order. Defaults to - true. - response: - docs: Success - type: root.ReturnChatPagedEvents - errors: - - root.BadRequestError - examples: - - path-parameters: - id: id - response: - body: - id: id - chat_group_id: chat_group_id - status: ACTIVE - start_timestamp: 1000000 - end_timestamp: 1000000 - pagination_direction: ASC - events_page: - - id: id - chat_id: chat_id - timestamp: 1000000 - role: USER - type: SYSTEM_PROMPT - message_text: message_text - emotion_features: emotion_features - metadata: metadata - metadata: metadata - page_number: 1 - page_size: 1 - total_pages: 1 - config: - id: id - version: 1 source: openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml index 800d80f6..ebda1a77 100644 --- a/.mock/definition/empathic-voice/chats.yml +++ b/.mock/definition/empathic-voice/chats.yml @@ -455,46 +455,5 @@ service: config: id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 version: 0 - get-audio: - path: /v0/evi/chats/{id}/audio - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a chat. Formatted as a UUID. - display-name: Get chat audio - response: - docs: Success - type: root.ReturnChatPagedEvents - errors: - - root.BadRequestError - examples: - - path-parameters: - id: id - response: - body: - id: id - chat_group_id: chat_group_id - status: ACTIVE - start_timestamp: 1000000 - end_timestamp: 1000000 - pagination_direction: ASC - events_page: - - id: id - chat_id: chat_id - timestamp: 1000000 - role: USER - type: SYSTEM_PROMPT - message_text: message_text - emotion_features: emotion_features - metadata: metadata - metadata: metadata - page_number: 1 - page_size: 1 - total_pages: 1 - config: - id: id - version: 1 source: openapi: stenographer-openapi.json diff --git a/poetry.lock b/poetry.lock index 4231d894..8afaa9c2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -238,21 +238,20 @@ lxml = ["lxml"] [[package]] name = "bleach" -version = "6.1.0" +version = "6.2.0" description = "An easy safelist-based HTML-sanitizing tool." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, ] [package.dependencies] -six = ">=1.9.0" webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] +css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "certifi" @@ -2582,23 +2581,23 @@ win32 = ["pywin32"] [[package]] name = "setuptools" -version = "75.2.0" +version = "75.3.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = true python-versions = ">=3.8" files = [ - {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, - {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, + {file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"}, + {file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.12.*)", "pytest-mypy"] [[package]] name = "simpleaudio" diff --git a/pyproject.toml b/pyproject.toml index eb7b5bec..9357d1a0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "hume" -version = "0.7.3" +version = "0.7.4" description = "A Python SDK for Hume AI" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index d84b76a9..253b9fc3 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,6 @@ # Reference -## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) +## EmpathicVoice Tools +
client.empathic_voice.tools.list_tools(...)
@@ -12,7 +12,9 @@
-Sort and filter jobs. +Fetches a paginated list of **Tools**. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -32,7 +34,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.list_jobs() +response = client.empathic_voice.tools.list_tools( + page_number=0, + page_size=2, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -48,33 +58,11 @@ client.expression_measurement.batch.list_jobs()
-**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. - -
-
- -
-
- -**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` - -Include only jobs of this status in the response. There are four possible statuses: - -- `QUEUED`: The job has been received and is waiting to be processed. - -- `IN_PROGRESS`: The job is currently being processed. - -- `COMPLETED`: The job has finished processing. - -- `FAILED`: The job encountered an error and could not be completed successfully. - -
-
+**page_number:** `typing.Optional[int]` -
-
+Specifies the page number to retrieve, enabling pagination. -**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -82,11 +70,11 @@ Include only jobs of this status in the response. There are four possible status
-**timestamp_ms:** `typing.Optional[int]` +**page_size:** `typing.Optional[int]` -Provide a timestamp in milliseconds to filter jobs. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -94,15 +82,7 @@ When combined with the `when` parameter, you can filter jobs before or after the
-**sort_by:** `typing.Optional[SortBy]` - -Specify which timestamp to sort the jobs by. - -- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. - -- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. - -- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -110,13 +90,7 @@ Specify which timestamp to sort the jobs by.
-**direction:** `typing.Optional[Direction]` - -Specify the order in which to sort the jobs. Defaults to descending order. - -- `asc`: Sort in ascending order (chronological, with the oldest records first). - -- `desc`: Sort in descending order (reverse-chronological, with the newest records first). +**name:** `typing.Optional[str]` — Filter to only include tools with name.
@@ -136,7 +110,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) +
client.empathic_voice.tools.create_tool(...)
@@ -148,7 +122,9 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-Start a new measurement inference job. +Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -168,9 +144,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job( - urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], - notify=True, +client.empathic_voice.tools.create_tool( + name="get_current_weather", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", + description="This tool is for getting the current weather.", + fallback_content="Unable to fetch current weather.", ) ``` @@ -187,19 +166,7 @@ client.expression_measurement.batch.start_inference_job(
-**models:** `typing.Optional[Models]` - -Specify the models to use for inference. - -If this field is not explicitly set, then all models will run by default. - -
-
- -
-
- -**transcription:** `typing.Optional[Transcription]` +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -207,11 +174,11 @@ If this field is not explicitly set, then all models will run by default.
-**urls:** `typing.Optional[typing.Sequence[str]]` +**parameters:** `str` -URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. +Stringified JSON defining the parameters used by this version of the Tool. -If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
@@ -219,7 +186,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -227,7 +194,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function.
@@ -235,7 +202,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure. +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -255,7 +222,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) +
client.empathic_voice.tools.list_tool_versions(...)
@@ -267,7 +234,9 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-Get the request details and state of a given job. +Fetches a list of a **Tool's** versions. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -287,8 +256,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_details( - id="job_id", +client.empathic_voice.tools.list_tool_versions( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) ``` @@ -305,7 +274,39 @@ client.expression_measurement.batch.get_job_details(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
+ +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -325,7 +326,7 @@ client.expression_measurement.batch.get_job_details(
-
client.expression_measurement.batch.get_job_predictions(...) +
client.empathic_voice.tools.create_tool_version(...)
@@ -337,7 +338,9 @@ client.expression_measurement.batch.get_job_details(
-Get the JSON predictions of a completed inference job. +Updates a **Tool** by creating a new version of the **Tool**. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -357,8 +360,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_predictions( - id="job_id", +client.empathic_voice.tools.create_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", + fallback_content="Unable to fetch current weather.", + description="This tool is for getting the current weather.", ) ``` @@ -375,7 +382,43 @@ client.expression_measurement.batch.get_job_predictions(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
+ +
+
+ +**parameters:** `str` + +Stringified JSON defining the parameters used by this version of the Tool. + +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Tool version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. + +
+
+ +
+
+ +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -395,7 +438,7 @@ client.expression_measurement.batch.get_job_predictions(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
client.empathic_voice.tools.delete_tool(...)
@@ -407,7 +450,9 @@ client.expression_measurement.batch.get_job_predictions(
-Start a new batch inference job. +Deletes a **Tool** and its versions. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -427,7 +472,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job_from_local_file() +client.empathic_voice.tools.delete_tool( + id="00183a3f-79ba-413d-9f3b-609864268bea", +) ``` @@ -443,17 +490,7 @@ client.expression_measurement.batch.start_inference_job_from_local_file()
-**file:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation - -
-
- -
-
- -**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -473,8 +510,7 @@ typing.List[core.File]` — See core.File for more documentation
-## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) +
client.empathic_voice.tools.update_tool_name(...)
@@ -486,7 +522,7 @@ typing.List[core.File]` — See core.File for more documentation
-Fetches a paginated list of **Tools**. +Updates the name of a **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -508,15 +544,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.tools.list_tools( - page_number=0, - page_size=2, +client.empathic_voice.tools.update_tool_name( + id="00183a3f-79ba-413d-9f3b-609864268bea", + name="get_current_temperature", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -532,39 +563,15 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
-Specifies the page number to retrieve, enabling pagination. +
+
-This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include tools with name. +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -584,7 +591,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) +
client.empathic_voice.tools.get_tool_version(...)
@@ -596,7 +603,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Fetches a specified version of a **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -618,12 +625,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool( - name="get_current_weather", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", - description="This tool is for getting the current weather.", - fallback_content="Unable to fetch current weather.", +client.empathic_voice.tools.get_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -640,19 +644,7 @@ client.empathic_voice.tools.create_tool(
-**name:** `str` — Name applied to all versions of a particular Tool. - -
-
- -
-
- -**parameters:** `str` - -Stringified JSON defining the parameters used by this version of the Tool. - -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -660,23 +652,13 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. - -
-
- -
-
+**version:** `int` -**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -696,7 +678,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.list_tool_versions(...) +
client.empathic_voice.tools.delete_tool_version(...)
@@ -708,7 +690,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-Fetches a list of a **Tool's** versions. +Deletes a specified version of a **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -730,8 +712,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.list_tool_versions( +client.empathic_voice.tools.delete_tool_version( id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -756,31 +739,13 @@ client.empathic_voice.tools.list_tool_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**version:** `int` -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -800,7 +765,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) +
client.empathic_voice.tools.update_tool_description(...)
@@ -812,7 +777,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Tool** by creating a new version of the **Tool**. +Updates the description of a specified **Tool** version. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -834,12 +799,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool_version( +client.empathic_voice.tools.update_tool_description( id="00183a3f-79ba-413d-9f3b-609864268bea", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", - fallback_content="Unable to fetch current weather.", - description="This tool is for getting the current weather.", + version=1, + version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", ) ``` @@ -864,27 +827,13 @@ client.empathic_voice.tools.create_tool_version(
-**parameters:** `str` - -Stringified JSON defining the parameters used by this version of the Tool. - -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. - -
-
- -
-
+**version:** `int` -**version_description:** `typing.Optional[str]` — An optional description of the Tool version. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -892,7 +841,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -912,7 +861,8 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.delete_tool(...) +## EmpathicVoice Prompts +
client.empathic_voice.prompts.list_prompts(...)
@@ -924,9 +874,9 @@ These parameters define the inputs needed for the Tool’s execution, including
-Deletes a **Tool** and its versions. +Fetches a paginated list of **Prompts**. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -946,9 +896,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool( - id="00183a3f-79ba-413d-9f3b-609864268bea", +response = client.empathic_voice.prompts.list_prompts( + page_number=0, + page_size=2, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -964,7 +920,39 @@ client.empathic_voice.tools.delete_tool(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — Only include the most recent version of each prompt in the list. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include prompts with name.
@@ -984,7 +972,7 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) +
client.empathic_voice.prompts.create_prompt(...)
@@ -996,9 +984,9 @@ client.empathic_voice.tools.delete_tool(
-Updates the name of a **Tool**. +Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1018,9 +1006,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_name( - id="00183a3f-79ba-413d-9f3b-609864268bea", - name="get_current_temperature", +client.empathic_voice.prompts.create_prompt( + name="Weather Assistant Prompt", + text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", ) ``` @@ -1037,7 +1025,7 @@ client.empathic_voice.tools.update_tool_name(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1045,7 +1033,21 @@ client.empathic_voice.tools.update_tool_name(
-**name:** `str` — Name applied to all versions of a particular Tool. +**text:** `str` + +Instructions used to shape EVI’s behavior, responses, and style. + +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. + +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1065,7 +1067,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) +
client.empathic_voice.prompts.list_prompt_versions(...)
@@ -1077,9 +1079,9 @@ client.empathic_voice.tools.update_tool_name(
-Fetches a specified version of a **Tool**. +Fetches a list of a **Prompt's** versions. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1099,9 +1101,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.get_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.list_prompt_versions( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1118,7 +1119,7 @@ client.empathic_voice.tools.get_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1126,13 +1127,11 @@ client.empathic_voice.tools.get_tool_version(
-**version:** `int` - -Version number for a Tool. +**page_number:** `typing.Optional[int]` -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Specifies the page number to retrieve, enabling pagination. -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -1140,33 +1139,53 @@ Version numbers are integer values representing different iterations of the Tool
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- -
+**page_size:** `typing.Optional[int]` +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + -
-
client.empathic_voice.tools.delete_tool_version(...)
-#### 📝 Description +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + + + + + +
+
client.empathic_voice.prompts.create_prompt_verison(...)
+#### 📝 Description +
-Deletes a specified version of a **Tool**. +
+
-Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Updates a **Prompt** by creating a new version of the **Prompt**. + +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1186,9 +1205,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.create_prompt_verison( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + version_description="This is an updated version of the Weather Assistant Prompt.", ) ``` @@ -1205,7 +1225,7 @@ client.empathic_voice.tools.delete_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1213,13 +1233,21 @@ client.empathic_voice.tools.delete_tool_version(
-**version:** `int` +**text:** `str` -Version number for a Tool. +Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1239,7 +1267,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.update_tool_description(...) +
client.empathic_voice.prompts.delete_prompt(...)
@@ -1251,9 +1279,9 @@ Version numbers are integer values representing different iterations of the Tool
-Updates the description of a specified **Tool** version. +Deletes a **Prompt** and its versions. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1273,10 +1301,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_description( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, - version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", +client.empathic_voice.prompts.delete_prompt( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1293,29 +1319,7 @@ client.empathic_voice.tools.update_tool_description(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. - -
-
- -
-
- -**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1335,8 +1339,7 @@ Version numbers are integer values representing different iterations of the Tool
-## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) +
client.empathic_voice.prompts.update_prompt_name(...)
@@ -1348,7 +1351,7 @@ Version numbers are integer values representing different iterations of the Tool
-Fetches a paginated list of **Prompts**. +Updates the name of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1370,15 +1373,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.prompts.list_prompts( - page_number=0, - page_size=2, +client.empathic_voice.prompts.update_prompt_name( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + name="Updated Weather Assistant Prompt Name", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -1394,31 +1392,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — Only include the most recent version of each prompt in the list. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1426,7 +1400,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include prompts with name. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1446,7 +1420,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) +
client.empathic_voice.prompts.get_prompt_version(...)
@@ -1458,7 +1432,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Fetches a specified version of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1480,9 +1454,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt( - name="Weather Assistant Prompt", - text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", +client.empathic_voice.prompts.get_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, ) ``` @@ -1499,7 +1473,7 @@ client.empathic_voice.prompts.create_prompt(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1507,21 +1481,13 @@ client.empathic_voice.prompts.create_prompt(
-**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style. - -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +**version:** `int` -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). - -
-
+Version number for a Prompt. -
-
+Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1541,7 +1507,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.list_prompt_versions(...) +
client.empathic_voice.prompts.delete_prompt_version(...)
@@ -1553,7 +1519,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Fetches a list of a **Prompt's** versions. +Deletes a specified version of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1575,8 +1541,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.list_prompt_versions( +client.empathic_voice.prompts.delete_prompt_version( id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, ) ``` @@ -1601,31 +1568,13 @@ client.empathic_voice.prompts.list_prompt_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**version:** `int` -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
+Version number for a Prompt. -
-
+Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1645,7 +1594,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_verison(...) +
client.empathic_voice.prompts.update_prompt_description(...)
@@ -1657,7 +1606,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Prompt** by creating a new version of the **Prompt**. +Updates the description of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1679,10 +1628,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt_verison( +client.empathic_voice.prompts.update_prompt_description( id="af699d45-2985-42cc-91b9-af9e5da3bac5", - text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", - version_description="This is an updated version of the Weather Assistant Prompt.", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -1707,13 +1656,13 @@ client.empathic_voice.prompts.create_prompt_verison(
-**text:** `str` +**version:** `int` -Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. +Version number for a Prompt. -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1741,7 +1690,8 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.delete_prompt(...) +## EmpathicVoice CustomVoices +
client.empathic_voice.custom_voices.list_custom_voices(...)
@@ -1753,9 +1703,9 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Deletes a **Prompt** and its versions. +Fetches a paginated list of **Custom Voices**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1775,9 +1725,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", -) +client.empathic_voice.custom_voices.list_custom_voices() ``` @@ -1793,7 +1741,31 @@ client.empathic_voice.prompts.delete_prompt(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include custom voices with name.
@@ -1813,7 +1785,7 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) +
client.empathic_voice.custom_voices.create_custom_voice(...)
@@ -1825,9 +1797,9 @@ client.empathic_voice.prompts.delete_prompt(
-Updates the name of a **Prompt**. +Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1847,9 +1819,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_name( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - name="Updated Weather Assistant Prompt Name", +client.empathic_voice.custom_voices.create_custom_voice( + name="name", + base_voice="ITO", ) ``` @@ -1866,7 +1838,7 @@ client.empathic_voice.prompts.update_prompt_name(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -1874,7 +1846,19 @@ client.empathic_voice.prompts.update_prompt_name(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -1894,7 +1878,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) +
client.empathic_voice.custom_voices.get_custom_voice(...)
@@ -1906,9 +1890,9 @@ client.empathic_voice.prompts.update_prompt_name(
-Fetches a specified version of a **Prompt**. +Fetches a specific **Custom Voice** by ID. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1928,9 +1912,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.get_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, +client.empathic_voice.custom_voices.get_custom_voice( + id="id", ) ``` @@ -1947,21 +1930,7 @@ client.empathic_voice.prompts.get_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1981,7 +1950,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.delete_prompt_version(...) +
client.empathic_voice.custom_voices.create_custom_voice_version(...)
@@ -1993,9 +1962,9 @@ Version numbers are integer values representing different iterations of the Prom
-Deletes a specified version of a **Prompt**. +Updates a **Custom Voice** by creating a new version of the **Custom Voice**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2015,9 +1984,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, +client.empathic_voice.custom_voices.create_custom_voice_version( + id="id", + name="name", + base_voice="ITO", ) ``` @@ -2034,7 +2004,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2042,13 +2012,27 @@ client.empathic_voice.prompts.delete_prompt_version(
-**version:** `int` +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
-Version number for a Prompt. +
+
-Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
-Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -2068,7 +2052,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.update_prompt_description(...) +
client.empathic_voice.custom_voices.delete_custom_voice(...)
@@ -2080,9 +2064,9 @@ Version numbers are integer values representing different iterations of the Prom
-Updates the description of a **Prompt**. +Deletes a **Custom Voice** and its versions. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2102,10 +2086,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_description( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, - version_description="This is an updated version_description.", +client.empathic_voice.custom_voices.delete_custom_voice( + id="id", ) ``` @@ -2122,29 +2104,7 @@ client.empathic_voice.prompts.update_prompt_description(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. - -
-
- -
-
- -**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2164,8 +2124,8 @@ Version numbers are integer values representing different iterations of the Prom
-## EmpathicVoice CustomVoices -
client.empathic_voice.custom_voices.list_custom_voices(...) +## EmpathicVoice Configs +
client.empathic_voice.configs.list_configs(...)
@@ -2177,9 +2137,9 @@ Version numbers are integer values representing different iterations of the Prom
-Fetches a paginated list of **Custom Voices**. +Fetches a paginated list of **Configs**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2199,7 +2159,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.list_custom_voices() +client.empathic_voice.configs.list_configs( + page_number=0, + page_size=1, +) ``` @@ -2239,7 +2202,15 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include custom voices with name. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include configs with this name.
@@ -2259,7 +2230,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.custom_voices.create_custom_voice(...) +
client.empathic_voice.configs.create_config(...)
@@ -2271,9 +2242,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Creates a **Config** which can be applied to EVI. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2289,13 +2260,47 @@ Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for detai ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.create_custom_voice( - name="name", - base_voice="ITO", +client.empathic_voice.configs.create_config( + name="Weather Assistant Config", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + evi_version="2", + voice=PostedVoice( + provider="HUME_AI", + name="SAMPLE VOICE", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), ) ``` @@ -2312,7 +2317,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2).
@@ -2320,7 +2325,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +**name:** `str` — Name applied to all versions of a particular Config.
@@ -2328,11 +2333,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**version_description:** `typing.Optional[str]` — An optional description of the Config version.
@@ -2340,71 +2341,55 @@ If no parameters are specified then all attributes will be set to their defaults
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**prompt:** `typing.Optional[PostedConfigPromptSpec]`
- -
- - - - -
-
client.empathic_voice.custom_voices.get_custom_voice(...)
-#### 📝 Description +**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. + +
+
-
-
+**language_model:** `typing.Optional[PostedLanguageModel]` -Fetches a specific **Custom Voice** by ID. +The supplemental language model associated with this Config. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. -
-
+This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. +
-#### 🔌 Usage - -
-
-
-```python -from hume import HumeClient +**ellm_model:** `typing.Optional[PostedEllmModel]` -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.get_custom_voice( - id="id", -) +The eLLM setup associated with this Config. -``` -
-
+Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. +
-#### ⚙️ Parameters -
+**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. + +
+
+
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config.
@@ -2412,73 +2397,7 @@ client.empathic_voice.custom_voices.get_custom_voice(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- - - - - - -
- -
client.empathic_voice.custom_voices.create_custom_voice_version(...) -
-
- -#### 📝 Description - -
-
- -
-
- -Updates a **Custom Voice** by creating a new version of the **Custom Voice**. - -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.create_custom_voice_version( - id="id", - name="name", - base_voice="ITO", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**event_messages:** `typing.Optional[PostedEventMessageSpecs]`
@@ -2486,27 +2405,7 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") - -
-
- -
-
- -**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. - -
-
- -
-
- -**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -2526,7 +2425,7 @@ If no parameters are specified then all attributes will be set to their defaults
-
client.empathic_voice.custom_voices.delete_custom_voice(...) +
client.empathic_voice.configs.list_config_versions(...)
@@ -2538,9 +2437,9 @@ If no parameters are specified then all attributes will be set to their defaults
-Deletes a **Custom Voice** and its versions. +Fetches a list of a **Config's** versions. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2560,8 +2459,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.delete_custom_voice( - id="id", +client.empathic_voice.configs.list_config_versions( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -2578,76 +2477,10 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
- -
- - - - -
- -## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) -
-
- -#### 📝 Description - -
-
- -
-
- -Fetches a paginated list of **Configs**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.configs.list_configs( - page_number=0, - page_size=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
@@ -2676,15 +2509,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include configs with this name. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false.
@@ -2704,7 +2529,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) +
client.empathic_voice.configs.create_config_version(...)
@@ -2716,7 +2541,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Config** which can be applied to EVI. +Updates a **Config** by creating a new version of the **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2736,6 +2561,7 @@ For more details on configuration options and how to configure EVI, see our [con from hume import HumeClient from hume.empathic_voice import ( PostedConfigPromptSpec, + PostedEllmModel, PostedEventMessageSpec, PostedEventMessageSpecs, PostedLanguageModel, @@ -2745,21 +2571,26 @@ from hume.empathic_voice import ( client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config( - name="Weather Assistant Config", +client.empathic_voice.configs.create_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version_description="This is an updated version of the Weather Assistant Config.", + evi_version="2", prompt=PostedConfigPromptSpec( id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0, ), - evi_version="2", voice=PostedVoice( - name="SAMPLE VOICE", + provider="HUME_AI", + name="ITO", ), language_model=PostedLanguageModel( model_provider="ANTHROPIC", model_resource="claude-3-5-sonnet-20240620", temperature=1.0, ), + ellm_model=PostedEllmModel( + allow_short_responses=True, + ), event_messages=PostedEventMessageSpecs( on_new_chat=PostedEventMessageSpec( enabled=False, @@ -2790,7 +2621,7 @@ client.empathic_voice.configs.create_config(
-**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2798,7 +2629,7 @@ client.empathic_voice.configs.create_config(
-**name:** `str` — Name applied to all versions of a particular Config. +**evi_version:** `str` — The version of the EVI used with this config.
@@ -2822,7 +2653,7 @@ client.empathic_voice.configs.create_config(
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. +**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version.
@@ -2832,7 +2663,7 @@ client.empathic_voice.configs.create_config( **language_model:** `typing.Optional[PostedLanguageModel]` -The supplemental language model associated with this Config. +The supplemental language model associated with this Config version. This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. @@ -2844,7 +2675,7 @@ This model is used to generate longer, more detailed responses from EVI. Choosin **ellm_model:** `typing.Optional[PostedEllmModel]` -The eLLM setup associated with this Config. +The eLLM setup associated with this Config version. Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. @@ -2854,7 +2685,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version.
@@ -2862,7 +2693,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. +**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version.
@@ -2898,7 +2729,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.list_config_versions(...) +
client.empathic_voice.configs.delete_config(...)
@@ -2910,7 +2741,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Fetches a list of a **Config's** versions. +Deletes a **Config** and its versions. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2932,7 +2763,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_config_versions( +client.empathic_voice.configs.delete_config( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) @@ -2958,23 +2789,72 @@ client.empathic_voice.configs.list_config_versions(
-**page_number:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
-Specifies the page number to retrieve, enabling pagination. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - +
+
client.empathic_voice.configs.update_config_name(...)
-**page_size:** `typing.Optional[int]` +#### 📝 Description -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +
+
+ +Updates the name of a **Config**. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.configs.update_config_name( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + name="Updated Weather Assistant Config Name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2982,7 +2862,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. +**name:** `str` — Name applied to all versions of a particular Config.
@@ -3002,7 +2882,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) +
client.empathic_voice.configs.get_config_version(...)
@@ -3014,7 +2894,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Config** by creating a new version of the **Config**. +Fetches a specified version of a **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3032,51 +2912,13 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEllmModel, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config_version( +client.empathic_voice.configs.get_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version_description="This is an updated version of the Weather Assistant Config.", - evi_version="2", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - voice=PostedVoice( - name="ITO", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - ellm_model=PostedEllmModel( - allow_short_responses=True, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), + version=1, ) ``` @@ -3101,7 +2943,13 @@ client.empathic_voice.configs.create_config_version(
-**evi_version:** `str` — The version of the EVI used with this config. +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3109,71 +2957,72 @@ client.empathic_voice.configs.create_config_version(
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` -
+
+
client.empathic_voice.configs.delete_config_version(...)
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. - -
-
+#### 📝 Description
-**language_model:** `typing.Optional[PostedLanguageModel]` +
+
-The supplemental language model associated with this Config version. +Deletes a specified version of a **Config**. -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +
+
+#### 🔌 Usage +
-**ellm_model:** `typing.Optional[PostedEllmModel]` +
+
-The eLLM setup associated with this Config version. +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.configs.delete_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, +) -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +```
- -
-
- -**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. -
+#### ⚙️ Parameters +
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. - -
-
-
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3181,7 +3030,13 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3201,7 +3056,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.delete_config(...) +
client.empathic_voice.configs.update_config_description(...)
@@ -3213,7 +3068,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Deletes a **Config** and its versions. +Updates the description of a **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3235,8 +3090,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config( +client.empathic_voice.configs.update_config_description( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -3261,6 +3118,28 @@ client.empathic_voice.configs.delete_config(
+**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Config version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3273,7 +3152,8 @@ client.empathic_voice.configs.delete_config(
-
client.empathic_voice.configs.update_config_name(...) +## EmpathicVoice Chats +
client.empathic_voice.chats.list_chats(...)
@@ -3285,9 +3165,7 @@ client.empathic_voice.configs.delete_config(
-Updates the name of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chats**.
@@ -3307,10 +3185,16 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_name( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - name="Updated Weather Assistant Config Name", +response = client.empathic_voice.chats.list_chats( + page_number=0, + page_size=1, + ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -3326,7 +3210,11 @@ client.empathic_voice.configs.update_config_name(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3334,7 +3222,19 @@ client.empathic_voice.configs.update_config_name(
-**name:** `str` — Name applied to all versions of a particular Config. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3354,7 +3254,7 @@ client.empathic_voice.configs.update_config_name(
-
client.empathic_voice.configs.get_config_version(...) +
client.empathic_voice.chats.list_chat_events(...)
@@ -3366,9 +3266,7 @@ client.empathic_voice.configs.update_config_name(
-Fetches a specified version of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chat** events.
@@ -3388,10 +3286,17 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.get_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +response = client.empathic_voice.chats.list_chat_events( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", + page_number=0, + page_size=3, + ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -3407,7 +3312,7 @@ client.empathic_voice.configs.get_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat. Formatted as a UUID.
@@ -3415,13 +3320,31 @@ client.empathic_voice.configs.get_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3441,7 +3364,8 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.delete_config_version(...) +## EmpathicVoice ChatGroups +
client.empathic_voice.chat_groups.list_chat_groups(...)
@@ -3453,9 +3377,7 @@ Version numbers are integer values representing different iterations of the Conf
-Deletes a specified version of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chat Groups**.
@@ -3475,9 +3397,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +client.empathic_voice.chat_groups.list_chat_groups( + page_number=0, + page_size=1, + ascending_order=True, + config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -3494,7 +3418,11 @@ client.empathic_voice.configs.delete_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3502,13 +3430,31 @@ client.empathic_voice.configs.delete_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. + +
+
+ +
+
+ +**config_id:** `typing.Optional[str]` + +The unique identifier for an EVI configuration. + +Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat.
@@ -3528,7 +3474,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.update_config_description(...) +
client.empathic_voice.chat_groups.get_chat_group(...)
@@ -3540,9 +3486,7 @@ Version numbers are integer values representing different iterations of the Conf
-Updates the description of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**.
@@ -3562,10 +3506,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_description( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, - version_description="This is an updated version_description.", +client.empathic_voice.chat_groups.get_chat_group( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=1, + ascending_order=True, ) ``` @@ -3582,7 +3527,7 @@ client.empathic_voice.configs.update_config_description(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
@@ -3590,13 +3535,23 @@ client.empathic_voice.configs.update_config_description(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3604,7 +3559,7 @@ Version numbers are integer values representing different iterations of the Conf
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3624,8 +3579,7 @@ Version numbers are integer values representing different iterations of the Conf
-## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) +
client.empathic_voice.chat_groups.list_chat_group_events(...)
@@ -3637,7 +3591,7 @@ Version numbers are integer values representing different iterations of the Conf
-Fetches a paginated list of **Chats**. +Fetches a paginated list of **Chat** events associated with a **Chat Group**.
@@ -3657,16 +3611,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chats( +client.empathic_voice.chat_groups.list_chat_group_events( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", page_number=0, - page_size=1, + page_size=3, ascending_order=True, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -3682,11 +3632,19 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. + +
+
-Specifies the page number to retrieve, enabling pagination. +
+
-This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -3694,11 +3652,11 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**page_number:** `typing.Optional[int]` -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +Specifies the page number to retrieve, enabling pagination. -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3726,7 +3684,8 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) +## ExpressionMeasurement Batch +
client.expression_measurement.batch.list_jobs(...)
@@ -3738,7 +3697,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Fetches a paginated list of **Chat** events. +Sort and filter jobs.
@@ -3758,17 +3717,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chat_events( - id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", - page_number=0, - page_size=3, - ascending_order=True, -) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page +client.expression_measurement.batch.list_jobs() ``` @@ -3784,7 +3733,7 @@ for page in response.iter_pages():
-**id:** `str` — Identifier for a Chat. Formatted as a UUID. +**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response.
@@ -3792,23 +3741,17 @@ for page in response.iter_pages():
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
+Include only jobs of this status in the response. There are four possible statuses: -
-
+- `QUEUED`: The job has been received and is waiting to be processed. -**page_number:** `typing.Optional[int]` +- `IN_PROGRESS`: The job is currently being processed. -Specifies the page number to retrieve, enabling pagination. +- `COMPLETED`: The job has finished processing. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +- `FAILED`: The job encountered an error and could not be completed successfully.
@@ -3816,7 +3759,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`.
@@ -3824,55 +3767,41 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- -
+**timestamp_ms:** `typing.Optional[int]` +Provide a timestamp in milliseconds to filter jobs. +When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + -
-
client.empathic_voice.chats.get_audio(...)
-#### 🔌 Usage - -
-
+**sort_by:** `typing.Optional[SortBy]` -
-
+Specify which timestamp to sort the jobs by. -```python -from hume import HumeClient +- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.chats.get_audio( - id="id", -) +- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. -``` -
-
+- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. +
-#### ⚙️ Parameters -
-
-
+**direction:** `typing.Optional[Direction]` + +Specify the order in which to sort the jobs. Defaults to descending order. -**id:** `str` — Identifier for a chat. Formatted as a UUID. +- `asc`: Sort in ascending order (chronological, with the oldest records first). + +- `desc`: Sort in descending order (reverse-chronological, with the newest records first).
@@ -3892,8 +3821,7 @@ client.empathic_voice.chats.get_audio(
-## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) +
client.expression_measurement.batch.start_inference_job(...)
@@ -3905,7 +3833,7 @@ client.empathic_voice.chats.get_audio(
-Fetches a paginated list of **Chat Groups**. +Start a new measurement inference job.
@@ -3925,11 +3853,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_groups( - page_number=0, - page_size=1, - ascending_order=True, - config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.expression_measurement.batch.start_inference_job( + urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], + notify=True, ) ``` @@ -3946,11 +3872,11 @@ client.empathic_voice.chat_groups.list_chat_groups(
-**page_number:** `typing.Optional[int]` +**models:** `typing.Optional[Models]` -Specifies the page number to retrieve, enabling pagination. +Specify the models to use for inference. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +If this field is not explicitly set, then all models will run by default.
@@ -3958,11 +3884,19 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**transcription:** `typing.Optional[Transcription]` + +
+
-Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**urls:** `typing.Optional[typing.Sequence[str]]` + +URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + +If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
@@ -3970,7 +3904,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis.
@@ -3978,11 +3912,15 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**config_id:** `typing.Optional[str]` +**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. + +
+
-The unique identifier for an EVI configuration. +
+
-Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat. +**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure.
@@ -4002,7 +3940,7 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-
client.empathic_voice.chat_groups.get_chat_group(...) +
client.expression_measurement.batch.get_job_details(...)
@@ -4014,7 +3952,7 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**. +Get the request details and state of a given job.
@@ -4034,11 +3972,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.get_chat_group( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=1, - ascending_order=True, +client.expression_measurement.batch.get_job_details( + id="job_id", ) ``` @@ -4055,39 +3990,7 @@ client.empathic_voice.chat_groups.get_chat_group(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**id:** `str` — The unique identifier for the job.
@@ -4107,7 +4010,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.list_chat_group_events(...) +
client.expression_measurement.batch.get_job_predictions(...)
@@ -4119,7 +4022,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches a paginated list of **Chat** events associated with a **Chat Group**. +Get the JSON predictions of a completed inference job.
@@ -4139,11 +4042,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_group_events( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=3, - ascending_order=True, +client.expression_measurement.batch.get_job_predictions( + id="job_id", ) ``` @@ -4160,7 +4060,7 @@ client.empathic_voice.chat_groups.list_chat_group_events(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. +**id:** `str` — The unique identifier for the job.
@@ -4168,53 +4068,35 @@ client.empathic_voice.chat_groups.list_chat_group_events(
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. -
+
+
client.expression_measurement.batch.start_inference_job_from_local_file(...)
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - -
-
+#### 📝 Description
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
+
+
+ +Start a new batch inference job.
- - -
- -
client.empathic_voice.chat_groups.get_audio(...) -
-
#### 🔌 Usage @@ -4230,9 +4112,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.get_audio( - id="id", -) +client.expression_measurement.batch.start_inference_job_from_local_file() ```
@@ -4248,31 +4128,9 @@ client.empathic_voice.chat_groups.get_audio(
-**id:** `str` — Identifier for a chat. Formatted as a UUID. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**file:** `from __future__ import annotations -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +typing.List[core.File]` — See core.File for more documentation
@@ -4280,7 +4138,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**ascending_order:** `typing.Optional[bool]` — Boolean to indicate if the results should be paginated in chronological order or reverse-chronological order. Defaults to true. +**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration.
diff --git a/src/hume/base_client.py b/src/hume/base_client.py index d293f18b..889ec357 100644 --- a/src/hume/base_client.py +++ b/src/hume/base_client.py @@ -4,11 +4,11 @@ from .environment import HumeClientEnvironment import httpx from .core.client_wrapper import SyncClientWrapper -from .expression_measurement.client import ExpressionMeasurementClient from .empathic_voice.client import EmpathicVoiceClient +from .expression_measurement.client import ExpressionMeasurementClient from .core.client_wrapper import AsyncClientWrapper -from .expression_measurement.client import AsyncExpressionMeasurementClient from .empathic_voice.client import AsyncEmpathicVoiceClient +from .expression_measurement.client import AsyncExpressionMeasurementClient class BaseHumeClient: @@ -69,8 +69,8 @@ def __init__( else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = EmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) class AsyncBaseHumeClient: @@ -131,8 +131,8 @@ def __init__( else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = AsyncEmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumeClientEnvironment) -> str: diff --git a/src/hume/empathic_voice/__init__.py b/src/hume/empathic_voice/__init__.py index 14992525..87f308f4 100644 --- a/src/hume/empathic_voice/__init__.py +++ b/src/hume/empathic_voice/__init__.py @@ -35,6 +35,7 @@ PostedEventMessageSpecs, PostedLanguageModel, PostedLanguageModelModelProvider, + PostedLanguageModelModelResource, PostedPromptSpec, PostedTimeoutSpec, PostedTimeoutSpecs, @@ -42,6 +43,7 @@ PostedTimeoutSpecsMaxDuration, PostedUserDefinedToolSpec, PostedVoice, + PostedVoiceProvider, ProsodyInference, ResumeAssistantMessage, ReturnActiveChatCount, @@ -49,10 +51,14 @@ ReturnBuiltinTool, ReturnBuiltinToolToolType, ReturnChat, + ReturnChatAudioReconstruction, + ReturnChatAudioReconstructionStatus, ReturnChatEvent, ReturnChatEventRole, ReturnChatEventType, ReturnChatGroup, + ReturnChatGroupPagedAudioReconstructions, + ReturnChatGroupPagedAudioReconstructionsPaginationDirection, ReturnChatGroupPagedChats, ReturnChatGroupPagedChatsPaginationDirection, ReturnChatGroupPagedEvents, @@ -71,6 +77,7 @@ ReturnEventMessageSpecs, ReturnLanguageModel, ReturnLanguageModelModelProvider, + ReturnLanguageModelModelResource, ReturnPagedChatGroups, ReturnPagedChatGroupsPaginationDirection, ReturnPagedChats, @@ -87,6 +94,7 @@ ReturnUserDefinedToolToolType, ReturnUserDefinedToolVersionType, ReturnVoice, + ReturnVoiceProvider, Role, SessionSettings, TextInput, @@ -144,6 +152,7 @@ "PostedEventMessageSpecs", "PostedLanguageModel", "PostedLanguageModelModelProvider", + "PostedLanguageModelModelResource", "PostedPromptSpec", "PostedTimeoutSpec", "PostedTimeoutSpecs", @@ -151,6 +160,7 @@ "PostedTimeoutSpecsMaxDuration", "PostedUserDefinedToolSpec", "PostedVoice", + "PostedVoiceProvider", "ProsodyInference", "PublishEvent", "ResumeAssistantMessage", @@ -159,10 +169,14 @@ "ReturnBuiltinTool", "ReturnBuiltinToolToolType", "ReturnChat", + "ReturnChatAudioReconstruction", + "ReturnChatAudioReconstructionStatus", "ReturnChatEvent", "ReturnChatEventRole", "ReturnChatEventType", "ReturnChatGroup", + "ReturnChatGroupPagedAudioReconstructions", + "ReturnChatGroupPagedAudioReconstructionsPaginationDirection", "ReturnChatGroupPagedChats", "ReturnChatGroupPagedChatsPaginationDirection", "ReturnChatGroupPagedEvents", @@ -181,6 +195,7 @@ "ReturnEventMessageSpecs", "ReturnLanguageModel", "ReturnLanguageModelModelProvider", + "ReturnLanguageModelModelResource", "ReturnPagedChatGroups", "ReturnPagedChatGroupsPaginationDirection", "ReturnPagedChats", @@ -197,6 +212,7 @@ "ReturnUserDefinedToolToolType", "ReturnUserDefinedToolVersionType", "ReturnVoice", + "ReturnVoiceProvider", "Role", "SessionSettings", "SubscribeEvent", diff --git a/src/hume/empathic_voice/chat_groups/client.py b/src/hume/empathic_voice/chat_groups/client.py index 3cf2e4d5..5e440db3 100644 --- a/src/hume/empathic_voice/chat_groups/client.py +++ b/src/hume/empathic_voice/chat_groups/client.py @@ -12,7 +12,6 @@ from ..types.return_chat_group_paged_chats import ReturnChatGroupPagedChats from ...core.jsonable_encoder import jsonable_encoder from ..types.return_chat_group_paged_events import ReturnChatGroupPagedEvents -from ..types.return_chat_paged_events import ReturnChatPagedEvents from ...core.client_wrapper import AsyncClientWrapper @@ -281,87 +280,6 @@ def list_chat_group_events( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_audio( - self, - id: str, - *, - page_number: typing.Optional[int] = None, - page_size: typing.Optional[int] = None, - ascending_order: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ReturnChatPagedEvents: - """ - Parameters - ---------- - id : str - Identifier for a chat. Formatted as a UUID. - - page_number : typing.Optional[int] - Specifies the page number to retrieve, enabling pagination. - - This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - - page_size : typing.Optional[int] - Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - - For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - - ascending_order : typing.Optional[bool] - Boolean to indicate if the results should be paginated in chronological order or reverse-chronological order. Defaults to true. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ReturnChatPagedEvents - Success - - Examples - -------- - from hume import HumeClient - - client = HumeClient( - api_key="YOUR_API_KEY", - ) - client.empathic_voice.chat_groups.get_audio( - id="id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v0/evi/chat_groups/{jsonable_encoder(id)}/audio", - method="GET", - params={ - "page_number": page_number, - "page_size": page_size, - "ascending_order": ascending_order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ReturnChatPagedEvents, - parse_obj_as( - type_=ReturnChatPagedEvents, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - ErrorResponse, - parse_obj_as( - type_=ErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - class AsyncChatGroupsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -651,92 +569,3 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_audio( - self, - id: str, - *, - page_number: typing.Optional[int] = None, - page_size: typing.Optional[int] = None, - ascending_order: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> ReturnChatPagedEvents: - """ - Parameters - ---------- - id : str - Identifier for a chat. Formatted as a UUID. - - page_number : typing.Optional[int] - Specifies the page number to retrieve, enabling pagination. - - This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - - page_size : typing.Optional[int] - Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - - For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - - ascending_order : typing.Optional[bool] - Boolean to indicate if the results should be paginated in chronological order or reverse-chronological order. Defaults to true. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ReturnChatPagedEvents - Success - - Examples - -------- - import asyncio - - from hume import AsyncHumeClient - - client = AsyncHumeClient( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.empathic_voice.chat_groups.get_audio( - id="id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v0/evi/chat_groups/{jsonable_encoder(id)}/audio", - method="GET", - params={ - "page_number": page_number, - "page_size": page_size, - "ascending_order": ascending_order, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ReturnChatPagedEvents, - parse_obj_as( - type_=ReturnChatPagedEvents, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - ErrorResponse, - parse_obj_as( - type_=ErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/hume/empathic_voice/chats/client.py b/src/hume/empathic_voice/chats/client.py index a51ef390..579ec830 100644 --- a/src/hume/empathic_voice/chats/client.py +++ b/src/hume/empathic_voice/chats/client.py @@ -220,61 +220,6 @@ def list_chat_events( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_audio(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> ReturnChatPagedEvents: - """ - Parameters - ---------- - id : str - Identifier for a chat. Formatted as a UUID. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ReturnChatPagedEvents - Success - - Examples - -------- - from hume import HumeClient - - client = HumeClient( - api_key="YOUR_API_KEY", - ) - client.empathic_voice.chats.get_audio( - id="id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v0/evi/chats/{jsonable_encoder(id)}/audio", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ReturnChatPagedEvents, - parse_obj_as( - type_=ReturnChatPagedEvents, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - ErrorResponse, - parse_obj_as( - type_=ErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - class AsyncChatsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -493,68 +438,3 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_audio( - self, id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> ReturnChatPagedEvents: - """ - Parameters - ---------- - id : str - Identifier for a chat. Formatted as a UUID. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - ReturnChatPagedEvents - Success - - Examples - -------- - import asyncio - - from hume import AsyncHumeClient - - client = AsyncHumeClient( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.empathic_voice.chats.get_audio( - id="id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v0/evi/chats/{jsonable_encoder(id)}/audio", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ReturnChatPagedEvents, - parse_obj_as( - type_=ReturnChatPagedEvents, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - ErrorResponse, - parse_obj_as( - type_=ErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/hume/empathic_voice/configs/client.py b/src/hume/empathic_voice/configs/client.py index 04b29052..b09577ec 100644 --- a/src/hume/empathic_voice/configs/client.py +++ b/src/hume/empathic_voice/configs/client.py @@ -204,6 +204,7 @@ def create_config( ), evi_version="2", voice=PostedVoice( + provider="HUME_AI", name="SAMPLE VOICE", ), language_model=PostedLanguageModel( @@ -464,6 +465,7 @@ def create_config_version( version=0, ), voice=PostedVoice( + provider="HUME_AI", name="ITO", ), language_model=PostedLanguageModel( @@ -1070,6 +1072,7 @@ async def main() -> None: ), evi_version="2", voice=PostedVoice( + provider="HUME_AI", name="SAMPLE VOICE", ), language_model=PostedLanguageModel( @@ -1346,6 +1349,7 @@ async def main() -> None: version=0, ), voice=PostedVoice( + provider="HUME_AI", name="ITO", ), language_model=PostedLanguageModel( diff --git a/src/hume/empathic_voice/types/__init__.py b/src/hume/empathic_voice/types/__init__.py index 94bf2b47..f88e8212 100644 --- a/src/hume/empathic_voice/types/__init__.py +++ b/src/hume/empathic_voice/types/__init__.py @@ -34,6 +34,7 @@ from .posted_event_message_specs import PostedEventMessageSpecs from .posted_language_model import PostedLanguageModel from .posted_language_model_model_provider import PostedLanguageModelModelProvider +from .posted_language_model_model_resource import PostedLanguageModelModelResource from .posted_prompt_spec import PostedPromptSpec from .posted_timeout_spec import PostedTimeoutSpec from .posted_timeout_specs import PostedTimeoutSpecs @@ -41,6 +42,7 @@ from .posted_timeout_specs_max_duration import PostedTimeoutSpecsMaxDuration from .posted_user_defined_tool_spec import PostedUserDefinedToolSpec from .posted_voice import PostedVoice +from .posted_voice_provider import PostedVoiceProvider from .prosody_inference import ProsodyInference from .resume_assistant_message import ResumeAssistantMessage from .return_active_chat_count import ReturnActiveChatCount @@ -48,10 +50,16 @@ from .return_builtin_tool import ReturnBuiltinTool from .return_builtin_tool_tool_type import ReturnBuiltinToolToolType from .return_chat import ReturnChat +from .return_chat_audio_reconstruction import ReturnChatAudioReconstruction +from .return_chat_audio_reconstruction_status import ReturnChatAudioReconstructionStatus from .return_chat_event import ReturnChatEvent from .return_chat_event_role import ReturnChatEventRole from .return_chat_event_type import ReturnChatEventType from .return_chat_group import ReturnChatGroup +from .return_chat_group_paged_audio_reconstructions import ReturnChatGroupPagedAudioReconstructions +from .return_chat_group_paged_audio_reconstructions_pagination_direction import ( + ReturnChatGroupPagedAudioReconstructionsPaginationDirection, +) from .return_chat_group_paged_chats import ReturnChatGroupPagedChats from .return_chat_group_paged_chats_pagination_direction import ReturnChatGroupPagedChatsPaginationDirection from .return_chat_group_paged_events import ReturnChatGroupPagedEvents @@ -70,6 +78,7 @@ from .return_event_message_specs import ReturnEventMessageSpecs from .return_language_model import ReturnLanguageModel from .return_language_model_model_provider import ReturnLanguageModelModelProvider +from .return_language_model_model_resource import ReturnLanguageModelModelResource from .return_paged_chat_groups import ReturnPagedChatGroups from .return_paged_chat_groups_pagination_direction import ReturnPagedChatGroupsPaginationDirection from .return_paged_chats import ReturnPagedChats @@ -86,6 +95,7 @@ from .return_user_defined_tool_tool_type import ReturnUserDefinedToolToolType from .return_user_defined_tool_version_type import ReturnUserDefinedToolVersionType from .return_voice import ReturnVoice +from .return_voice_provider import ReturnVoiceProvider from .role import Role from .session_settings import SessionSettings from .text_input import TextInput @@ -138,6 +148,7 @@ "PostedEventMessageSpecs", "PostedLanguageModel", "PostedLanguageModelModelProvider", + "PostedLanguageModelModelResource", "PostedPromptSpec", "PostedTimeoutSpec", "PostedTimeoutSpecs", @@ -145,6 +156,7 @@ "PostedTimeoutSpecsMaxDuration", "PostedUserDefinedToolSpec", "PostedVoice", + "PostedVoiceProvider", "ProsodyInference", "ResumeAssistantMessage", "ReturnActiveChatCount", @@ -152,10 +164,14 @@ "ReturnBuiltinTool", "ReturnBuiltinToolToolType", "ReturnChat", + "ReturnChatAudioReconstruction", + "ReturnChatAudioReconstructionStatus", "ReturnChatEvent", "ReturnChatEventRole", "ReturnChatEventType", "ReturnChatGroup", + "ReturnChatGroupPagedAudioReconstructions", + "ReturnChatGroupPagedAudioReconstructionsPaginationDirection", "ReturnChatGroupPagedChats", "ReturnChatGroupPagedChatsPaginationDirection", "ReturnChatGroupPagedEvents", @@ -174,6 +190,7 @@ "ReturnEventMessageSpecs", "ReturnLanguageModel", "ReturnLanguageModelModelProvider", + "ReturnLanguageModelModelResource", "ReturnPagedChatGroups", "ReturnPagedChatGroupsPaginationDirection", "ReturnPagedChats", @@ -190,6 +207,7 @@ "ReturnUserDefinedToolToolType", "ReturnUserDefinedToolVersionType", "ReturnVoice", + "ReturnVoiceProvider", "Role", "SessionSettings", "TextInput", diff --git a/src/hume/empathic_voice/types/posted_language_model.py b/src/hume/empathic_voice/types/posted_language_model.py index b9d61c46..ecc3a7a0 100644 --- a/src/hume/empathic_voice/types/posted_language_model.py +++ b/src/hume/empathic_voice/types/posted_language_model.py @@ -4,6 +4,7 @@ import typing from .posted_language_model_model_provider import PostedLanguageModelModelProvider import pydantic +from .posted_language_model_model_resource import PostedLanguageModelModelResource from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +18,7 @@ class PostedLanguageModel(UniversalBaseModel): The provider of the supplemental language model. """ - model_resource: typing.Optional[str] = pydantic.Field(default=None) + model_resource: typing.Optional[PostedLanguageModelModelResource] = pydantic.Field(default=None) """ String that specifies the language model to use with `model_provider`. """ diff --git a/src/hume/empathic_voice/types/posted_language_model_model_resource.py b/src/hume/empathic_voice/types/posted_language_model_model_resource.py new file mode 100644 index 00000000..f917685d --- /dev/null +++ b/src/hume/empathic_voice/types/posted_language_model_model_resource.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostedLanguageModelModelResource = typing.Union[ + typing.Literal[ + "claude-3-5-sonnet-latest", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-instant-1.2", + "gemini-1.5-pro", + "gemini-1.5-flash", + "gemini-1.5-pro-002", + "gemini-1.5-flash-002", + "gpt-4-turbo-preview", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-4o", + "gpt-4o-mini", + "gemma-7b-it", + "llama3-8b-8192", + "llama3-70b-8192", + "llama-3.1-70b-versatile", + "llama-3.1-8b-instant", + "accounts/fireworks/models/mixtral-8x7b-instruct", + "accounts/fireworks/models/llama-v3p1-405b-instruct", + "accounts/fireworks/models/llama-v3p1-70b-instruct", + "accounts/fireworks/models/llama-v3p1-8b-instruct", + "ellm", + ], + typing.Any, +] diff --git a/src/hume/empathic_voice/types/posted_voice.py b/src/hume/empathic_voice/types/posted_voice.py index 9d48eebf..603047bc 100644 --- a/src/hume/empathic_voice/types/posted_voice.py +++ b/src/hume/empathic_voice/types/posted_voice.py @@ -1,8 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -import typing +from .posted_voice_provider import PostedVoiceProvider import pydantic +import typing from .posted_custom_voice import PostedCustomVoice from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -12,9 +13,9 @@ class PostedVoice(UniversalBaseModel): A Voice specification posted to the server """ - provider: typing.Literal["HUME_AI"] = pydantic.Field(default="HUME_AI") + provider: PostedVoiceProvider = pydantic.Field() """ - The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. + The provider of the voice to use. Supported values are `HUME_AI` and `CUSTOM_VOICE`. """ name: typing.Optional[str] = pydantic.Field(default=None) diff --git a/src/hume/empathic_voice/types/posted_voice_provider.py b/src/hume/empathic_voice/types/posted_voice_provider.py new file mode 100644 index 00000000..e3895de1 --- /dev/null +++ b/src/hume/empathic_voice/types/posted_voice_provider.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PostedVoiceProvider = typing.Union[typing.Literal["HUME_AI", "CUSTOM_VOICE"], typing.Any] diff --git a/src/hume/empathic_voice/types/return_chat_audio_reconstruction.py b/src/hume/empathic_voice/types/return_chat_audio_reconstruction.py new file mode 100644 index 00000000..b12a2ba0 --- /dev/null +++ b/src/hume/empathic_voice/types/return_chat_audio_reconstruction.py @@ -0,0 +1,67 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import pydantic +from .return_chat_audio_reconstruction_status import ReturnChatAudioReconstructionStatus +import typing +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ReturnChatAudioReconstruction(UniversalBaseModel): + """ + List of chat audio reconstructions returned for the specified page number and page size. + """ + + id: str = pydantic.Field() + """ + Identifier for the chat. Formatted as a UUID. + """ + + user_id: str = pydantic.Field() + """ + Identifier for the user that owns this chat. Formatted as a UUID. + """ + + status: ReturnChatAudioReconstructionStatus = pydantic.Field() + """ + Indicates the current state of the audio reconstruction job. There are five possible statuses: + + - `QUEUED`: The reconstruction job is waiting to be processed. + + - `IN_PROGRESS`: The reconstruction is currently being processed. + + - `COMPLETE`: The audio reconstruction is finished and ready for download. + + - `ERROR`: An error occurred during the reconstruction process. + + - `CANCELED`: The reconstruction job has been canceled. + """ + + filename: typing.Optional[str] = pydantic.Field(default=None) + """ + Name of the chat audio reconstruction file. + """ + + modified_at: typing.Optional[int] = pydantic.Field(default=None) + """ + The timestamp of the most recent status change for this audio reconstruction, formatted milliseconds since the Unix epoch. + """ + + signed_audio_url: typing.Optional[str] = pydantic.Field(default=None) + """ + Signed URL used to download the chat audio reconstruction file. + """ + + signed_url_expiration_timestamp_millis: typing.Optional[int] = pydantic.Field(default=None) + """ + The timestamp when the signed URL will expire, formatted as a Unix epoch milliseconds. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/return_chat_audio_reconstruction_status.py b/src/hume/empathic_voice/types/return_chat_audio_reconstruction_status.py new file mode 100644 index 00000000..7609906c --- /dev/null +++ b/src/hume/empathic_voice/types/return_chat_audio_reconstruction_status.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReturnChatAudioReconstructionStatus = typing.Union[ + typing.Literal["QUEUED", "IN_PROGRESS", "COMPLETE", "ERROR", "CANCELLED"], typing.Any +] diff --git a/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions.py b/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions.py new file mode 100644 index 00000000..fb746d79 --- /dev/null +++ b/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions.py @@ -0,0 +1,71 @@ +# This file was auto-generated by Fern from our API Definition. + +from ...core.pydantic_utilities import UniversalBaseModel +import pydantic +from .return_chat_group_paged_audio_reconstructions_pagination_direction import ( + ReturnChatGroupPagedAudioReconstructionsPaginationDirection, +) +import typing +from .return_chat_audio_reconstruction import ReturnChatAudioReconstruction +from ...core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ReturnChatGroupPagedAudioReconstructions(UniversalBaseModel): + """ + A paginated list of chat reconstructions for a particular chatgroup + """ + + id: str = pydantic.Field() + """ + Identifier for the chat group. Formatted as a UUID. + """ + + user_id: str = pydantic.Field() + """ + Identifier for the user that owns this chat. Formatted as a UUID. + """ + + num_chats: int = pydantic.Field() + """ + Total number of chats in this chatgroup + """ + + page_number: int = pydantic.Field() + """ + The page number of the returned list. + + This value corresponds to the `page_number` parameter specified in the request. Pagination uses zero-based indexing. + """ + + page_size: int = pydantic.Field() + """ + The maximum number of items returned per page. + + This value corresponds to the `page_size` parameter specified in the request. + """ + + total_pages: int = pydantic.Field() + """ + The total number of pages in the collection. + """ + + pagination_direction: ReturnChatGroupPagedAudioReconstructionsPaginationDirection = pydantic.Field() + """ + Indicates the order in which the paginated results are presented, based on their creation date. + + It shows `ASC` for ascending order (chronological, with the oldest records first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + """ + + audio_reconstructions_page: typing.List[ReturnChatAudioReconstruction] = pydantic.Field() + """ + List of chat audio reconstructions returned for the specified page number and page size. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions_pagination_direction.py b/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions_pagination_direction.py new file mode 100644 index 00000000..4fbd5e97 --- /dev/null +++ b/src/hume/empathic_voice/types/return_chat_group_paged_audio_reconstructions_pagination_direction.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReturnChatGroupPagedAudioReconstructionsPaginationDirection = typing.Union[typing.Literal["ASC", "DESC"], typing.Any] diff --git a/src/hume/empathic_voice/types/return_language_model.py b/src/hume/empathic_voice/types/return_language_model.py index b5f337c6..f0a3cb66 100644 --- a/src/hume/empathic_voice/types/return_language_model.py +++ b/src/hume/empathic_voice/types/return_language_model.py @@ -4,6 +4,7 @@ import typing from .return_language_model_model_provider import ReturnLanguageModelModelProvider import pydantic +from .return_language_model_model_resource import ReturnLanguageModelModelResource from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +18,7 @@ class ReturnLanguageModel(UniversalBaseModel): The provider of the supplemental language model. """ - model_resource: typing.Optional[str] = pydantic.Field(default=None) + model_resource: typing.Optional[ReturnLanguageModelModelResource] = pydantic.Field(default=None) """ String that specifies the language model to use with `model_provider`. """ diff --git a/src/hume/empathic_voice/types/return_language_model_model_resource.py b/src/hume/empathic_voice/types/return_language_model_model_resource.py new file mode 100644 index 00000000..2d9bc0ed --- /dev/null +++ b/src/hume/empathic_voice/types/return_language_model_model_resource.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReturnLanguageModelModelResource = typing.Union[ + typing.Literal[ + "claude-3-5-sonnet-latest", + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307", + "claude-2.1", + "claude-instant-1.2", + "gemini-1.5-pro", + "gemini-1.5-flash", + "gemini-1.5-pro-002", + "gemini-1.5-flash-002", + "gpt-4-turbo-preview", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo", + "gpt-4o", + "gpt-4o-mini", + "gemma-7b-it", + "llama3-8b-8192", + "llama3-70b-8192", + "llama-3.1-70b-versatile", + "llama-3.1-8b-instant", + "accounts/fireworks/models/mixtral-8x7b-instruct", + "accounts/fireworks/models/llama-v3p1-405b-instruct", + "accounts/fireworks/models/llama-v3p1-70b-instruct", + "accounts/fireworks/models/llama-v3p1-8b-instruct", + "ellm", + ], + typing.Any, +] diff --git a/src/hume/empathic_voice/types/return_voice.py b/src/hume/empathic_voice/types/return_voice.py index aa1ef685..9079ec0c 100644 --- a/src/hume/empathic_voice/types/return_voice.py +++ b/src/hume/empathic_voice/types/return_voice.py @@ -1,8 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -import typing +from .return_voice_provider import ReturnVoiceProvider import pydantic +import typing from .return_custom_voice import ReturnCustomVoice from ...core.pydantic_utilities import IS_PYDANTIC_V2 @@ -12,9 +13,9 @@ class ReturnVoice(UniversalBaseModel): A specific voice specification """ - provider: typing.Literal["HUME_AI"] = pydantic.Field(default="HUME_AI") + provider: ReturnVoiceProvider = pydantic.Field() """ - The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. + The provider of the voice to use. Supported values are `HUME_AI` and `CUSTOM_VOICE`. """ name: typing.Optional[str] = pydantic.Field(default=None) diff --git a/src/hume/empathic_voice/types/return_voice_provider.py b/src/hume/empathic_voice/types/return_voice_provider.py new file mode 100644 index 00000000..e68946df --- /dev/null +++ b/src/hume/empathic_voice/types/return_voice_provider.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReturnVoiceProvider = typing.Union[typing.Literal["HUME_AI", "CUSTOM_VOICE"], typing.Any] diff --git a/tests/empathic_voice/test_chat_groups.py b/tests/empathic_voice/test_chat_groups.py index 9b0d1e4c..5a89ce19 100644 --- a/tests/empathic_voice/test_chat_groups.py +++ b/tests/empathic_voice/test_chat_groups.py @@ -201,64 +201,3 @@ async def test_list_chat_group_events(client: HumeClient, async_client: AsyncHum id="697056f0-6c7e-487d-9bd8-9c19df79f05f", page_number=0, page_size=3, ascending_order=True ) validate_response(async_response, expected_response, expected_types) - - -async def test_get_audio(client: HumeClient, async_client: AsyncHumeClient) -> None: - expected_response: typing.Any = { - "id": "id", - "chat_group_id": "chat_group_id", - "status": "ACTIVE", - "start_timestamp": 1000000, - "end_timestamp": 1000000, - "pagination_direction": "ASC", - "events_page": [ - { - "id": "id", - "chat_id": "chat_id", - "timestamp": 1000000, - "role": "USER", - "type": "SYSTEM_PROMPT", - "message_text": "message_text", - "emotion_features": "emotion_features", - "metadata": "metadata", - } - ], - "metadata": "metadata", - "page_number": 1, - "page_size": 1, - "total_pages": 1, - "config": {"id": "id", "version": 1}, - } - expected_types: typing.Any = { - "id": None, - "chat_group_id": None, - "status": None, - "start_timestamp": None, - "end_timestamp": None, - "pagination_direction": None, - "events_page": ( - "list", - { - 0: { - "id": None, - "chat_id": None, - "timestamp": None, - "role": None, - "type": None, - "message_text": None, - "emotion_features": None, - "metadata": None, - } - }, - ), - "metadata": None, - "page_number": "integer", - "page_size": "integer", - "total_pages": "integer", - "config": {"id": None, "version": "integer"}, - } - response = client.empathic_voice.chat_groups.get_audio(id="id") - validate_response(response, expected_response, expected_types) - - async_response = await async_client.empathic_voice.chat_groups.get_audio(id="id") - validate_response(async_response, expected_response, expected_types) diff --git a/tests/empathic_voice/test_chats.py b/tests/empathic_voice/test_chats.py deleted file mode 100644 index 98f8d98d..00000000 --- a/tests/empathic_voice/test_chats.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from hume import HumeClient -from hume import AsyncHumeClient -import typing -from ..utilities import validate_response - - -async def test_get_audio(client: HumeClient, async_client: AsyncHumeClient) -> None: - expected_response: typing.Any = { - "id": "id", - "chat_group_id": "chat_group_id", - "status": "ACTIVE", - "start_timestamp": 1000000, - "end_timestamp": 1000000, - "pagination_direction": "ASC", - "events_page": [ - { - "id": "id", - "chat_id": "chat_id", - "timestamp": 1000000, - "role": "USER", - "type": "SYSTEM_PROMPT", - "message_text": "message_text", - "emotion_features": "emotion_features", - "metadata": "metadata", - } - ], - "metadata": "metadata", - "page_number": 1, - "page_size": 1, - "total_pages": 1, - "config": {"id": "id", "version": 1}, - } - expected_types: typing.Any = { - "id": None, - "chat_group_id": None, - "status": None, - "start_timestamp": None, - "end_timestamp": None, - "pagination_direction": None, - "events_page": ( - "list", - { - 0: { - "id": None, - "chat_id": None, - "timestamp": None, - "role": None, - "type": None, - "message_text": None, - "emotion_features": None, - "metadata": None, - } - }, - ), - "metadata": None, - "page_number": "integer", - "page_size": "integer", - "total_pages": "integer", - "config": {"id": None, "version": "integer"}, - } - response = client.empathic_voice.chats.get_audio(id="id") - validate_response(response, expected_response, expected_types) - - async_response = await async_client.empathic_voice.chats.get_audio(id="id") - validate_response(async_response, expected_response, expected_types) diff --git a/tests/empathic_voice/test_configs.py b/tests/empathic_voice/test_configs.py index ce2c9158..33e9e947 100644 --- a/tests/empathic_voice/test_configs.py +++ b/tests/empathic_voice/test_configs.py @@ -282,7 +282,7 @@ async def test_create_config(client: HumeClient, async_client: AsyncHumeClient) name="Weather Assistant Config", prompt=PostedConfigPromptSpec(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0), evi_version="2", - voice=PostedVoice(name="SAMPLE VOICE"), + voice=PostedVoice(provider="HUME_AI", name="SAMPLE VOICE"), language_model=PostedLanguageModel( model_provider="ANTHROPIC", model_resource="claude-3-5-sonnet-20240620", temperature=1.0 ), @@ -298,7 +298,7 @@ async def test_create_config(client: HumeClient, async_client: AsyncHumeClient) name="Weather Assistant Config", prompt=PostedConfigPromptSpec(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0), evi_version="2", - voice=PostedVoice(name="SAMPLE VOICE"), + voice=PostedVoice(provider="HUME_AI", name="SAMPLE VOICE"), language_model=PostedLanguageModel( model_provider="ANTHROPIC", model_resource="claude-3-5-sonnet-20240620", temperature=1.0 ), @@ -584,7 +584,7 @@ async def test_create_config_version(client: HumeClient, async_client: AsyncHume version_description="This is an updated version of the Weather Assistant Config.", evi_version="2", prompt=PostedConfigPromptSpec(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0), - voice=PostedVoice(name="ITO"), + voice=PostedVoice(provider="HUME_AI", name="ITO"), language_model=PostedLanguageModel( model_provider="ANTHROPIC", model_resource="claude-3-5-sonnet-20240620", temperature=1.0 ), @@ -602,7 +602,7 @@ async def test_create_config_version(client: HumeClient, async_client: AsyncHume version_description="This is an updated version of the Weather Assistant Config.", evi_version="2", prompt=PostedConfigPromptSpec(id="af699d45-2985-42cc-91b9-af9e5da3bac5", version=0), - voice=PostedVoice(name="ITO"), + voice=PostedVoice(provider="HUME_AI", name="ITO"), language_model=PostedLanguageModel( model_provider="ANTHROPIC", model_resource="claude-3-5-sonnet-20240620", temperature=1.0 ),