diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index 2fc7119a..2af2f290 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -19,6 +19,7 @@ types: docs: >- Type of Tool. Either `BUILTIN` for natively implemented tools, like web search, or `FUNCTION` for user-defined tools. + inline: true source: openapi: stenographer-openapi.json ReturnUserDefinedToolVersionType: @@ -28,6 +29,7 @@ types: docs: >- Versioning method for a Tool. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. + inline: true source: openapi: stenographer-openapi.json ReturnUserDefinedTool: @@ -106,6 +108,7 @@ types: docs: >- Versioning method for a Prompt. Either `FIXED` for using a fixed version number or `LATEST` for auto-updating to the latest version. + inline: true source: openapi: stenographer-openapi.json ReturnPrompt: @@ -178,6 +181,7 @@ types: - STELLA - SUNNY docs: Specifies the base voice used to create the Custom Voice. + inline: true source: openapi: stenographer-openapi.json PostedCustomVoiceParameters: @@ -337,6 +341,7 @@ types: - STELLA - SUNNY docs: The base voice used to create the Custom Voice. + inline: true source: openapi: stenographer-openapi.json ReturnCustomVoiceParameters: @@ -513,6 +518,7 @@ types: For more information, see our guide on [using built-in tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). + inline: true source: openapi: stenographer-openapi.json PostedBuiltinTool: @@ -642,6 +648,7 @@ types: - GROQ - GOOGLE docs: The provider of the supplemental language model. + inline: true source: openapi: stenographer-openapi.json PostedLanguageModelModelResource: @@ -698,6 +705,7 @@ types: name: AccountsFireworksModelsLlamaV3P18BInstruct - ellm docs: String that specifies the language model to use with `model_provider`. + inline: true source: openapi: stenographer-openapi.json PostedLanguageModel: @@ -847,6 +855,7 @@ types: docs: >- The provider of the voice to use. Supported values are `HUME_AI` and `CUSTOM_VOICE`. + inline: true source: openapi: stenographer-openapi.json PostedVoice: @@ -880,6 +889,7 @@ types: - chat_started - chat_ended docs: Events this URL is subscribed to + inline: true source: openapi: stenographer-openapi.json PostedWebhookSpec: @@ -887,9 +897,18 @@ types: properties: url: type: string - docs: URL to send the webhook to + docs: >- + The URL where event payloads will be sent. This must be a valid https + URL to ensure secure communication. The server at this URL must accept + POST requests with a JSON payload. events: - docs: Events this URL is subscribed to + docs: >- + The list of events the specified URL is subscribed to. + + + See our [webhooks + guide](/docs/empathic-voice-interface-evi/configuration#supported-events) + for more information on supported events. type: list source: openapi: stenographer-openapi.json @@ -900,6 +919,7 @@ types: docs: >- Type of Tool. Either `BUILTIN` for natively implemented tools, like web search, or `FUNCTION` for user-defined tools. + inline: true source: openapi: stenographer-openapi.json ReturnBuiltinTool: @@ -1083,6 +1103,7 @@ types: - GROQ - GOOGLE docs: The provider of the supplemental language model. + inline: true source: openapi: stenographer-openapi.json ReturnLanguageModelModelResource: @@ -1143,6 +1164,7 @@ types: name: AccountsFireworksModelsLlamaV3P18BInstruct - ellm docs: String that specifies the language model to use with `model_provider`. + inline: true source: openapi: stenographer-openapi.json ReturnLanguageModel: @@ -1224,6 +1246,7 @@ types: docs: >- The provider of the voice to use. Supported values are `HUME_AI` and `CUSTOM_VOICE`. + inline: true source: openapi: stenographer-openapi.json ReturnVoice: @@ -1251,6 +1274,7 @@ types: - chat_started - chat_ended docs: Events this URL is subscribed to + inline: true source: openapi: stenographer-openapi.json ReturnWebhookSpec: @@ -1258,9 +1282,18 @@ types: properties: url: type: string - docs: Webhook URL to send the event updates to + docs: >- + The URL where event payloads will be sent. This must be a valid https + URL to ensure secure communication. The server at this URL must accept + POST requests with a JSON payload. events: - docs: Events this URL is subscribed to + docs: >- + The list of events the specified URL is subscribed to. + + + See our [webhooks + guide](/docs/empathic-voice-interface-evi/configuration#supported-events) + for more information on supported events. type: list source: openapi: stenographer-openapi.json @@ -1407,6 +1440,7 @@ types: - `ERROR`: The chat ended unexpectedly due to an error. + inline: true source: openapi: stenographer-openapi.json ReturnChat: @@ -1499,6 +1533,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnPagedChats: @@ -1562,6 +1597,7 @@ types: - `TOOL`: The function calling mechanism. + inline: true source: openapi: stenographer-openapi.json ReturnChatEventType: @@ -1594,6 +1630,7 @@ types: - `FUNCTION_CALL_RESPONSE`: Contains the tool response. + inline: true source: openapi: stenographer-openapi.json ReturnChatEvent: @@ -1702,6 +1739,7 @@ types: - `ERROR`: The chat ended unexpectedly due to an error. + inline: true source: openapi: stenographer-openapi.json ReturnChatPagedEventsPaginationDirection: @@ -1717,6 +1755,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnChatPagedEvents: @@ -1830,6 +1869,7 @@ types: - `CANCELED`: The reconstruction job has been canceled. + inline: true source: openapi: stenographer-openapi.json ReturnChatAudioReconstruction: @@ -1929,6 +1969,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnPagedChatGroups: @@ -1984,6 +2025,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnChatGroupPagedChats: @@ -2062,6 +2104,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnChatGroupPagedEvents: @@ -2122,6 +2165,7 @@ types: first) or `DESC` for descending order (reverse-chronological, with the newest records first). This value corresponds to the `ascending_order` query parameter used in the request. + inline: true source: openapi: stenographer-openapi.json ReturnChatGroupPagedAudioReconstructions: diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml index 47609fbd..ae78c07c 100644 --- a/.mock/definition/empathic-voice/chat.yml +++ b/.mock/definition/empathic-voice/chat.yml @@ -76,6 +76,7 @@ channel: list of all available chat groups. verbose_transcription: type: optional + default: false docs: >- A flag to enable verbose transcription. Set this query parameter to `true` to have unfinalized user transcripts be sent to the client as @@ -86,6 +87,7 @@ channel: denotes whether the message is "interim" or "final." access_token: type: optional + default: '' docs: >- Access token used for authenticating the client. If not provided, an `api_key` must be provided to authenticate. @@ -100,6 +102,7 @@ channel: Guide](/docs/introduction/api-key#authentication-strategies). api_key: type: optional + default: '' docs: >- API key used for authenticating the client. If not provided, an `access_token` must be provided to authenticate. diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml index a4ddace6..78d301e9 100644 --- a/.mock/definition/expression-measurement/batch/__package__.yml +++ b/.mock/definition/expression-measurement/batch/__package__.yml @@ -15,6 +15,7 @@ service: query-parameters: limit: type: optional + default: 50 docs: The maximum number of jobs to include in the response. status: type: optional @@ -43,6 +44,7 @@ service: `timestamp_ms`. timestamp_ms: type: optional + default: 1704319392247 docs: |- Provide a timestamp in milliseconds to filter jobs. diff --git a/.mock/fern.config.json b/.mock/fern.config.json index a7de23b2..631e4bcc 100644 --- a/.mock/fern.config.json +++ b/.mock/fern.config.json @@ -1,4 +1,4 @@ { "organization" : "hume", - "version" : "0.45.1" + "version" : "0.50.6" } \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 8b36333d..738b3467 100644 --- a/poetry.lock +++ b/poetry.lock @@ -566,37 +566,37 @@ toml = ["tomli"] [[package]] name = "debugpy" -version = "1.8.11" +version = "1.8.12" description = "An implementation of the Debug Adapter Protocol for Python" optional = true python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.11-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:2b26fefc4e31ff85593d68b9022e35e8925714a10ab4858fb1b577a8a48cb8cd"}, - {file = "debugpy-1.8.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61bc8b3b265e6949855300e84dc93d02d7a3a637f2aec6d382afd4ceb9120c9f"}, - {file = "debugpy-1.8.11-cp310-cp310-win32.whl", hash = "sha256:c928bbf47f65288574b78518449edaa46c82572d340e2750889bbf8cd92f3737"}, - {file = "debugpy-1.8.11-cp310-cp310-win_amd64.whl", hash = "sha256:8da1db4ca4f22583e834dcabdc7832e56fe16275253ee53ba66627b86e304da1"}, - {file = "debugpy-1.8.11-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:85de8474ad53ad546ff1c7c7c89230db215b9b8a02754d41cb5a76f70d0be296"}, - {file = "debugpy-1.8.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ffc382e4afa4aee367bf413f55ed17bd91b191dcaf979890af239dda435f2a1"}, - {file = "debugpy-1.8.11-cp311-cp311-win32.whl", hash = "sha256:40499a9979c55f72f4eb2fc38695419546b62594f8af194b879d2a18439c97a9"}, - {file = "debugpy-1.8.11-cp311-cp311-win_amd64.whl", hash = "sha256:987bce16e86efa86f747d5151c54e91b3c1e36acc03ce1ddb50f9d09d16ded0e"}, - {file = "debugpy-1.8.11-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:84e511a7545d11683d32cdb8f809ef63fc17ea2a00455cc62d0a4dbb4ed1c308"}, - {file = "debugpy-1.8.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce291a5aca4985d82875d6779f61375e959208cdf09fcec40001e65fb0a54768"}, - {file = "debugpy-1.8.11-cp312-cp312-win32.whl", hash = "sha256:28e45b3f827d3bf2592f3cf7ae63282e859f3259db44ed2b129093ca0ac7940b"}, - {file = "debugpy-1.8.11-cp312-cp312-win_amd64.whl", hash = "sha256:44b1b8e6253bceada11f714acf4309ffb98bfa9ac55e4fce14f9e5d4484287a1"}, - {file = "debugpy-1.8.11-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:8988f7163e4381b0da7696f37eec7aca19deb02e500245df68a7159739bbd0d3"}, - {file = "debugpy-1.8.11-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c1f6a173d1140e557347419767d2b14ac1c9cd847e0b4c5444c7f3144697e4e"}, - {file = "debugpy-1.8.11-cp313-cp313-win32.whl", hash = "sha256:bb3b15e25891f38da3ca0740271e63ab9db61f41d4d8541745cfc1824252cb28"}, - {file = "debugpy-1.8.11-cp313-cp313-win_amd64.whl", hash = "sha256:d8768edcbeb34da9e11bcb8b5c2e0958d25218df7a6e56adf415ef262cd7b6d1"}, - {file = "debugpy-1.8.11-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:ad7efe588c8f5cf940f40c3de0cd683cc5b76819446abaa50dc0829a30c094db"}, - {file = "debugpy-1.8.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:189058d03a40103a57144752652b3ab08ff02b7595d0ce1f651b9acc3a3a35a0"}, - {file = "debugpy-1.8.11-cp38-cp38-win32.whl", hash = "sha256:32db46ba45849daed7ccf3f2e26f7a386867b077f39b2a974bb5c4c2c3b0a280"}, - {file = "debugpy-1.8.11-cp38-cp38-win_amd64.whl", hash = "sha256:116bf8342062246ca749013df4f6ea106f23bc159305843491f64672a55af2e5"}, - {file = "debugpy-1.8.11-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:654130ca6ad5de73d978057eaf9e582244ff72d4574b3e106fb8d3d2a0d32458"}, - {file = "debugpy-1.8.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23dc34c5e03b0212fa3c49a874df2b8b1b8fda95160bd79c01eb3ab51ea8d851"}, - {file = "debugpy-1.8.11-cp39-cp39-win32.whl", hash = "sha256:52d8a3166c9f2815bfae05f386114b0b2d274456980d41f320299a8d9a5615a7"}, - {file = "debugpy-1.8.11-cp39-cp39-win_amd64.whl", hash = "sha256:52c3cf9ecda273a19cc092961ee34eb9ba8687d67ba34cc7b79a521c1c64c4c0"}, - {file = "debugpy-1.8.11-py2.py3-none-any.whl", hash = "sha256:0e22f846f4211383e6a416d04b4c13ed174d24cc5d43f5fd52e7821d0ebc8920"}, - {file = "debugpy-1.8.11.tar.gz", hash = "sha256:6ad2688b69235c43b020e04fecccdf6a96c8943ca9c2fb340b8adc103c655e57"}, + {file = "debugpy-1.8.12-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:a2ba7ffe58efeae5b8fad1165357edfe01464f9aef25e814e891ec690e7dd82a"}, + {file = "debugpy-1.8.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbbd4149c4fc5e7d508ece083e78c17442ee13b0e69bfa6bd63003e486770f45"}, + {file = "debugpy-1.8.12-cp310-cp310-win32.whl", hash = "sha256:b202f591204023b3ce62ff9a47baa555dc00bb092219abf5caf0e3718ac20e7c"}, + {file = "debugpy-1.8.12-cp310-cp310-win_amd64.whl", hash = "sha256:9649eced17a98ce816756ce50433b2dd85dfa7bc92ceb60579d68c053f98dff9"}, + {file = "debugpy-1.8.12-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:36f4829839ef0afdfdd208bb54f4c3d0eea86106d719811681a8627ae2e53dd5"}, + {file = "debugpy-1.8.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a28ed481d530e3138553be60991d2d61103ce6da254e51547b79549675f539b7"}, + {file = "debugpy-1.8.12-cp311-cp311-win32.whl", hash = "sha256:4ad9a94d8f5c9b954e0e3b137cc64ef3f579d0df3c3698fe9c3734ee397e4abb"}, + {file = "debugpy-1.8.12-cp311-cp311-win_amd64.whl", hash = "sha256:4703575b78dd697b294f8c65588dc86874ed787b7348c65da70cfc885efdf1e1"}, + {file = "debugpy-1.8.12-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:7e94b643b19e8feb5215fa508aee531387494bf668b2eca27fa769ea11d9f498"}, + {file = "debugpy-1.8.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:086b32e233e89a2740c1615c2f775c34ae951508b28b308681dbbb87bba97d06"}, + {file = "debugpy-1.8.12-cp312-cp312-win32.whl", hash = "sha256:2ae5df899732a6051b49ea2632a9ea67f929604fd2b036613a9f12bc3163b92d"}, + {file = "debugpy-1.8.12-cp312-cp312-win_amd64.whl", hash = "sha256:39dfbb6fa09f12fae32639e3286112fc35ae976114f1f3d37375f3130a820969"}, + {file = "debugpy-1.8.12-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:696d8ae4dff4cbd06bf6b10d671e088b66669f110c7c4e18a44c43cf75ce966f"}, + {file = "debugpy-1.8.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:898fba72b81a654e74412a67c7e0a81e89723cfe2a3ea6fcd3feaa3395138ca9"}, + {file = "debugpy-1.8.12-cp313-cp313-win32.whl", hash = "sha256:22a11c493c70413a01ed03f01c3c3a2fc4478fc6ee186e340487b2edcd6f4180"}, + {file = "debugpy-1.8.12-cp313-cp313-win_amd64.whl", hash = "sha256:fdb3c6d342825ea10b90e43d7f20f01535a72b3a1997850c0c3cefa5c27a4a2c"}, + {file = "debugpy-1.8.12-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:b0232cd42506d0c94f9328aaf0d1d0785f90f87ae72d9759df7e5051be039738"}, + {file = "debugpy-1.8.12-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9af40506a59450f1315168d47a970db1a65aaab5df3833ac389d2899a5d63b3f"}, + {file = "debugpy-1.8.12-cp38-cp38-win32.whl", hash = "sha256:5cc45235fefac57f52680902b7d197fb2f3650112379a6fa9aa1b1c1d3ed3f02"}, + {file = "debugpy-1.8.12-cp38-cp38-win_amd64.whl", hash = "sha256:557cc55b51ab2f3371e238804ffc8510b6ef087673303890f57a24195d096e61"}, + {file = "debugpy-1.8.12-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:b5c6c967d02fee30e157ab5227706f965d5c37679c687b1e7bbc5d9e7128bd41"}, + {file = "debugpy-1.8.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a77f422f31f170c4b7e9ca58eae2a6c8e04da54121900651dfa8e66c29901a"}, + {file = "debugpy-1.8.12-cp39-cp39-win32.whl", hash = "sha256:a4042edef80364239f5b7b5764e55fd3ffd40c32cf6753da9bda4ff0ac466018"}, + {file = "debugpy-1.8.12-cp39-cp39-win_amd64.whl", hash = "sha256:f30b03b0f27608a0b26c75f0bb8a880c752c0e0b01090551b9d87c7d783e2069"}, + {file = "debugpy-1.8.12-py2.py3-none-any.whl", hash = "sha256:274b6a2040349b5c9864e475284bce5bb062e63dce368a394b8cc865ae3b00c6"}, + {file = "debugpy-1.8.12.tar.gz", hash = "sha256:646530b04f45c830ceae8e491ca1c9320a2d2f0efea3141487c82130aba70dce"}, ] [[package]] @@ -2357,18 +2357,19 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "referencing" -version = "0.35.1" +version = "0.36.1" description = "JSON Referencing + Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, - {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, + {file = "referencing-0.36.1-py3-none-any.whl", hash = "sha256:363d9c65f080d0d70bc41c721dce3c7f3e77fc09f269cd5c8813da18069a6794"}, + {file = "referencing-0.36.1.tar.gz", hash = "sha256:ca2e6492769e3602957e9b831b94211599d2aade9477f5d44110d2530cf9aade"}, ] [package.dependencies] attrs = ">=22.2.0" rpds-py = ">=0.7.0" +typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "requests" diff --git a/pyproject.toml b/pyproject.toml index 521f5723..5a7e5805 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,6 @@ -[project] -name = "hume" [tool.poetry] name = "hume" -version = "0.7.7" +version = "0.7.8" description = "A Python SDK for Hume AI" readme = "README.md" authors = [] @@ -105,3 +103,6 @@ build-backend = "poetry.core.masonry.api" examples=["jupyter"] microphone=["pydub", "simpleaudio", "sounddevice"] legacy=["pydub"] + +[project] +name = "hume" diff --git a/reference.md b/reference.md index 84e4ab74..5fe728f3 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,6 @@ # Reference -## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) +## ExpressionMeasurement Batch +
client.expression_measurement.batch.list_jobs(...)
@@ -12,9 +12,7 @@
-Fetches a paginated list of **Tools**. - -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Sort and filter jobs.
@@ -34,15 +32,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.tools.list_tools( - page_number=0, - page_size=2, -) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page +client.expression_measurement.batch.list_jobs() ``` @@ -58,11 +48,25 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. + +
+
-Specifies the page number to retrieve, enabling pagination. +
+
-This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` + +Include only jobs of this status in the response. There are four possible statuses: + +- `QUEUED`: The job has been received and is waiting to be processed. + +- `IN_PROGRESS`: The job is currently being processed. + +- `COMPLETED`: The job has finished processing. + +- `FAILED`: The job encountered an error and could not be completed successfully.
@@ -70,11 +74,19 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. + +
+
-Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**timestamp_ms:** `typing.Optional[int]` + +Provide a timestamp in milliseconds to filter jobs. + + When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided.
@@ -82,7 +94,15 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. +**sort_by:** `typing.Optional[SortBy]` + +Specify which timestamp to sort the jobs by. + +- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. + +- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. + +- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`.
@@ -90,7 +110,13 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include tools with name. +**direction:** `typing.Optional[Direction]` + +Specify the order in which to sort the jobs. Defaults to descending order. + +- `asc`: Sort in ascending order (chronological, with the oldest records first). + +- `desc`: Sort in descending order (reverse-chronological, with the newest records first).
@@ -110,7 +136,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) +
client.expression_measurement.batch.start_inference_job(...)
@@ -122,9 +148,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). - -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Start a new measurement inference job.
@@ -144,12 +168,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool( - name="get_current_weather", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", - description="This tool is for getting the current weather.", - fallback_content="Unable to fetch current weather.", +client.expression_measurement.batch.start_inference_job( + urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], + notify=True, ) ``` @@ -166,7 +187,11 @@ client.empathic_voice.tools.create_tool(
-**name:** `str` — Name applied to all versions of a particular Tool. +**models:** `typing.Optional[Models]` + +Specify the models to use for inference. + +If this field is not explicitly set, then all models will run by default.
@@ -174,11 +199,19 @@ client.empathic_voice.tools.create_tool(
-**parameters:** `str` +**transcription:** `typing.Optional[Transcription]` + +
+
-Stringified JSON defining the parameters used by this version of the Tool. +
+
-These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +**urls:** `typing.Optional[typing.Sequence[str]]` + +URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + +If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
@@ -186,7 +219,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis.
@@ -194,7 +227,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. +**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure.
@@ -202,7 +235,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure.
@@ -222,7 +255,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.list_tool_versions(...) +
client.expression_measurement.batch.get_job_details(...)
@@ -234,9 +267,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-Fetches a list of a **Tool's** versions. - -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Get the request details and state of a given job.
@@ -256,8 +287,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.list_tool_versions( - id="00183a3f-79ba-413d-9f3b-609864268bea", +client.expression_measurement.batch.get_job_details( + id="job_id", ) ``` @@ -274,39 +305,7 @@ client.empathic_voice.tools.list_tool_versions(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. +**id:** `str` — The unique identifier for the job.
@@ -326,7 +325,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) +
client.expression_measurement.batch.get_job_predictions(...)
@@ -338,9 +337,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Tool** by creating a new version of the **Tool**. - -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Get the JSON predictions of a completed inference job.
@@ -360,12 +357,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", - fallback_content="Unable to fetch current weather.", - description="This tool is for getting the current weather.", +client.expression_measurement.batch.get_job_predictions( + id="job_id", ) ``` @@ -382,43 +375,7 @@ client.empathic_voice.tools.create_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**parameters:** `str` - -Stringified JSON defining the parameters used by this version of the Tool. - -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. - -
-
- -
-
- -**version_description:** `typing.Optional[str]` — An optional description of the Tool version. - -
-
- -
-
- -**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. - -
-
- -
-
- -**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +**id:** `str` — The unique identifier for the job.
@@ -438,7 +395,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.delete_tool(...) +
client.expression_measurement.batch.start_inference_job_from_local_file(...)
@@ -450,9 +407,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-Deletes a **Tool** and its versions. - -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +Start a new batch inference job.
@@ -472,9 +427,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool( - id="00183a3f-79ba-413d-9f3b-609864268bea", -) +client.expression_measurement.batch.start_inference_job_from_local_file() ``` @@ -490,7 +443,17 @@ client.empathic_voice.tools.delete_tool(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**file:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration.
@@ -510,7 +473,8 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) +## EmpathicVoice Tools +
client.empathic_voice.tools.list_tools(...)
@@ -522,7 +486,7 @@ client.empathic_voice.tools.delete_tool(
-Updates the name of a **Tool**. +Fetches a paginated list of **Tools**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -544,10 +508,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_name( - id="00183a3f-79ba-413d-9f3b-609864268bea", - name="get_current_temperature", +response = client.empathic_voice.tools.list_tools( + page_number=0, + page_size=2, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ```
@@ -563,7 +532,11 @@ client.empathic_voice.tools.update_tool_name(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -571,7 +544,11 @@ client.empathic_voice.tools.update_tool_name(
-**name:** `str` — Name applied to all versions of a particular Tool. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -579,7 +556,23 @@ client.empathic_voice.tools.update_tool_name(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include tools with name. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -591,7 +584,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) +
client.empathic_voice.tools.create_tool(...)
@@ -603,7 +596,7 @@ client.empathic_voice.tools.update_tool_name(
-Fetches a specified version of a **Tool**. +Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -625,9 +618,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.get_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.tools.create_tool( + name="get_current_weather", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", + description="This tool is for getting the current weather.", + fallback_content="Unable to fetch current weather.", ) ``` @@ -644,7 +640,7 @@ client.empathic_voice.tools.get_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -652,13 +648,35 @@ client.empathic_voice.tools.get_tool_version(
-**version:** `int` +**parameters:** `str` -Version number for a Tool. +Stringified JSON defining the parameters used by this version of the Tool. -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. + +
+
-Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Tool version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. + +
+
+ +
+
+ +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -678,7 +696,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.delete_tool_version(...) +
client.empathic_voice.tools.list_tool_versions(...)
@@ -690,7 +708,7 @@ Version numbers are integer values representing different iterations of the Tool
-Deletes a specified version of a **Tool**. +Fetches a list of a **Tool's** versions. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -712,9 +730,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool_version( +client.empathic_voice.tools.list_tool_versions( id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, ) ``` @@ -739,13 +756,31 @@ client.empathic_voice.tools.delete_tool_version(
-**version:** `int` +**page_number:** `typing.Optional[int]` -Version number for a Tool. +Specifies the page number to retrieve, enabling pagination. -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
-Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -765,7 +800,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.update_tool_description(...) +
client.empathic_voice.tools.create_tool_version(...)
@@ -777,7 +812,7 @@ Version numbers are integer values representing different iterations of the Tool
-Updates the description of a specified **Tool** version. +Updates a **Tool** by creating a new version of the **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -799,10 +834,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_description( +client.empathic_voice.tools.create_tool_version( id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, - version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", + fallback_content="Unable to fetch current weather.", + description="This tool is for getting the current weather.", ) ``` @@ -827,13 +864,11 @@ client.empathic_voice.tools.update_tool_description(
-**version:** `int` - -Version number for a Tool. +**parameters:** `str` -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Stringified JSON defining the parameters used by this version of the Tool. -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
@@ -849,6 +884,22 @@ Version numbers are integer values representing different iterations of the Tool
+**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. + +
+
+ +
+
+ +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -861,8 +912,7 @@ Version numbers are integer values representing different iterations of the Tool
-## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) +
client.empathic_voice.tools.delete_tool(...)
@@ -874,9 +924,9 @@ Version numbers are integer values representing different iterations of the Tool
-Fetches a paginated list of **Prompts**. +Deletes a **Tool** and its versions. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -896,15 +946,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.prompts.list_prompts( - page_number=0, - page_size=2, +client.empathic_voice.tools.delete_tool( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -920,39 +964,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — Only include the most recent version of each prompt in the list. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include prompts with name. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -972,7 +984,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) +
client.empathic_voice.tools.update_tool_name(...)
@@ -984,9 +996,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Updates the name of a **Tool**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -1006,9 +1018,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt( - name="Weather Assistant Prompt", - text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", +client.empathic_voice.tools.update_tool_name( + id="00183a3f-79ba-413d-9f3b-609864268bea", + name="get_current_temperature", ) ``` @@ -1025,21 +1037,7 @@ client.empathic_voice.prompts.create_prompt(
-**name:** `str` — Name applied to all versions of a particular Prompt. - -
-
- -
-
- -**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style. - -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. - -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -1047,7 +1045,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -1067,7 +1065,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.list_prompt_versions(...) +
client.empathic_voice.tools.get_tool_version(...)
@@ -1079,9 +1077,9 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Fetches a list of a **Prompt's** versions. +Fetches a specified version of a **Tool**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -1101,8 +1099,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.list_prompt_versions( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", +client.empathic_voice.tools.get_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -1119,19 +1118,7 @@ client.empathic_voice.prompts.list_prompt_versions(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -1139,19 +1126,13 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**version:** `int` -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -1171,7 +1152,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_verison(...) +
client.empathic_voice.tools.delete_tool_version(...)
@@ -1183,9 +1164,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Prompt** by creating a new version of the **Prompt**. +Deletes a specified version of a **Tool**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -1205,10 +1186,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt_verison( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", - version_description="This is an updated version of the Weather Assistant Prompt.", +client.empathic_voice.tools.delete_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -1225,7 +1205,7 @@ client.empathic_voice.prompts.create_prompt_verison(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -1233,21 +1213,13 @@ client.empathic_voice.prompts.create_prompt_verison(
-**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. - -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +**version:** `int` -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -1267,7 +1239,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.delete_prompt(...) +
client.empathic_voice.tools.update_tool_description(...)
@@ -1279,9 +1251,9 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Deletes a **Prompt** and its versions. +Updates the description of a specified **Tool** version. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -1301,8 +1273,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", +client.empathic_voice.tools.update_tool_description( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, + version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", ) ``` @@ -1319,7 +1293,29 @@ client.empathic_voice.prompts.delete_prompt(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
+ +
+
+ +**version:** `int` + +Version number for a Tool. + +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -1339,7 +1335,8 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) +## EmpathicVoice Prompts +
client.empathic_voice.prompts.list_prompts(...)
@@ -1351,7 +1348,7 @@ client.empathic_voice.prompts.delete_prompt(
-Updates the name of a **Prompt**. +Fetches a paginated list of **Prompts**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1373,10 +1370,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_name( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - name="Updated Weather Assistant Prompt Name", +response = client.empathic_voice.prompts.list_prompts( + page_number=0, + page_size=2, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ```
@@ -1392,7 +1394,11 @@ client.empathic_voice.prompts.update_prompt_name(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -1400,7 +1406,27 @@ client.empathic_voice.prompts.update_prompt_name(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — Only include the most recent version of each prompt in the list. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include prompts with name.
@@ -1420,7 +1446,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) +
client.empathic_voice.prompts.create_prompt(...)
@@ -1432,7 +1458,7 @@ client.empathic_voice.prompts.update_prompt_name(
-Fetches a specified version of a **Prompt**. +Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1454,9 +1480,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.get_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, +client.empathic_voice.prompts.create_prompt( + name="Weather Assistant Prompt", + text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", ) ``` @@ -1473,7 +1499,7 @@ client.empathic_voice.prompts.get_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1481,13 +1507,21 @@ client.empathic_voice.prompts.get_prompt_version(
-**version:** `int` +**text:** `str` -Version number for a Prompt. +Instructions used to shape EVI’s behavior, responses, and style. -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1507,7 +1541,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.delete_prompt_version(...) +
client.empathic_voice.prompts.list_prompt_versions(...)
@@ -1519,7 +1553,7 @@ Version numbers are integer values representing different iterations of the Prom
-Deletes a specified version of a **Prompt**. +Fetches a list of a **Prompt's** versions. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1541,9 +1575,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt_version( +client.empathic_voice.prompts.list_prompt_versions( id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, ) ``` @@ -1568,13 +1601,31 @@ client.empathic_voice.prompts.delete_prompt_version(
-**version:** `int` +**page_number:** `typing.Optional[int]` -Version number for a Prompt. +Specifies the page number to retrieve, enabling pagination. -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
-Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false.
@@ -1594,7 +1645,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.update_prompt_description(...) +
client.empathic_voice.prompts.create_prompt_verison(...)
@@ -1606,7 +1657,7 @@ Version numbers are integer values representing different iterations of the Prom
-Updates the description of a **Prompt**. +Updates a **Prompt** by creating a new version of the **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1628,10 +1679,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_description( +client.empathic_voice.prompts.create_prompt_verison( id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, - version_description="This is an updated version_description.", + text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + version_description="This is an updated version of the Weather Assistant Prompt.", ) ``` @@ -1656,13 +1707,13 @@ client.empathic_voice.prompts.update_prompt_description(
-**version:** `int` +**text:** `str` -Version number for a Prompt. +Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting).
@@ -1690,8 +1741,7 @@ Version numbers are integer values representing different iterations of the Prom
-## EmpathicVoice CustomVoices -
client.empathic_voice.custom_voices.list_custom_voices(...) +
client.empathic_voice.prompts.delete_prompt(...)
@@ -1703,9 +1753,9 @@ Version numbers are integer values representing different iterations of the Prom
-Fetches a paginated list of **Custom Voices**. +Deletes a **Prompt** and its versions. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1725,7 +1775,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.list_custom_voices() +client.empathic_voice.prompts.delete_prompt( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", +) ``` @@ -1741,31 +1793,7 @@ client.empathic_voice.custom_voices.list_custom_voices()
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include custom voices with name. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1785,7 +1813,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.custom_voices.create_custom_voice(...) +
client.empathic_voice.prompts.update_prompt_name(...)
@@ -1797,9 +1825,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Updates the name of a **Prompt**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1819,9 +1847,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.create_custom_voice( - name="name", - base_voice="ITO", +client.empathic_voice.prompts.update_prompt_name( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + name="Updated Weather Assistant Prompt Name", ) ``` @@ -1838,15 +1866,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") - -
-
- -
-
- -**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1854,11 +1874,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1878,7 +1894,7 @@ If no parameters are specified then all attributes will be set to their defaults
-
client.empathic_voice.custom_voices.get_custom_voice(...) +
client.empathic_voice.prompts.get_prompt_version(...)
@@ -1890,9 +1906,9 @@ If no parameters are specified then all attributes will be set to their defaults
-Fetches a specific **Custom Voice** by ID. +Fetches a specified version of a **Prompt**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1912,8 +1928,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.get_custom_voice( - id="id", +client.empathic_voice.prompts.get_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, ) ``` @@ -1930,7 +1947,21 @@ client.empathic_voice.custom_voices.get_custom_voice(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID. + +
+
+ +
+
+ +**version:** `int` + +Version number for a Prompt. + +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1950,7 +1981,7 @@ client.empathic_voice.custom_voices.get_custom_voice(
-
client.empathic_voice.custom_voices.create_custom_voice_version(...) +
client.empathic_voice.prompts.delete_prompt_version(...)
@@ -1962,9 +1993,9 @@ client.empathic_voice.custom_voices.get_custom_voice(
-Updates a **Custom Voice** by creating a new version of the **Custom Voice**. +Deletes a specified version of a **Prompt**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1984,10 +2015,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.create_custom_voice_version( - id="id", - name="name", - base_voice="ITO", +client.empathic_voice.prompts.delete_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, ) ``` @@ -2004,15 +2034,7 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. - -
-
- -
-
- -**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -2020,19 +2042,13 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. - -
-
- -
-
+**version:** `int` -**parameters:** `typing.Optional[PostedCustomVoiceParameters]` +Version number for a Prompt. -The specified attributes of a Custom Voice. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -2052,7 +2068,7 @@ If no parameters are specified then all attributes will be set to their defaults
-
client.empathic_voice.custom_voices.delete_custom_voice(...) +
client.empathic_voice.prompts.update_prompt_description(...)
@@ -2064,9 +2080,9 @@ If no parameters are specified then all attributes will be set to their defaults
-Deletes a **Custom Voice** and its versions. +Updates the description of a **Prompt**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -2086,8 +2102,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.delete_custom_voice( - id="id", +client.empathic_voice.prompts.update_prompt_description( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -2104,7 +2122,29 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID. + +
+
+ +
+
+ +**version:** `int` + +Version number for a Prompt. + +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -2124,8 +2164,8 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) +## EmpathicVoice CustomVoices +
client.empathic_voice.custom_voices.list_custom_voices(...)
@@ -2137,9 +2177,9 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-Fetches a paginated list of **Configs**. +Fetches a paginated list of **Custom Voices**. -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2159,10 +2199,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_configs( - page_number=0, - page_size=1, -) +client.empathic_voice.custom_voices.list_custom_voices() ``` @@ -2202,15 +2239,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include configs with this name. +**name:** `typing.Optional[str]` — Filter to only include custom voices with name.
@@ -2230,7 +2259,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) +
client.empathic_voice.custom_voices.create_custom_voice(...)
@@ -2242,9 +2271,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Config** which can be applied to EVI. +Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2260,47 +2289,13 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config( - name="Weather Assistant Config", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - evi_version="2", - voice=PostedVoice( - provider="HUME_AI", - name="SAMPLE VOICE", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), +client.empathic_voice.custom_voices.create_custom_voice( + name="name", + base_voice="ITO", ) ``` @@ -2317,7 +2312,7 @@ client.empathic_voice.configs.create_config(
-**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -2325,7 +2320,7 @@ client.empathic_voice.configs.create_config(
-**name:** `str` — Name applied to all versions of a particular Config. +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice.
@@ -2333,7 +2328,11 @@ client.empathic_voice.configs.create_config(
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -2341,79 +2340,71 @@ client.empathic_voice.configs.create_config(
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. -
+
+
client.empathic_voice.custom_voices.get_custom_voice(...)
-**language_model:** `typing.Optional[PostedLanguageModel]` - -The supplemental language model associated with this Config. - -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - -
-
+#### 📝 Description
-**ellm_model:** `typing.Optional[PostedEllmModel]` +
+
-The eLLM setup associated with this Config. +Fetches a specific **Custom Voice** by ID. -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
- -
-
- -**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. -
+#### 🔌 Usage +
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. - -
-
-
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` - +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.get_custom_voice( + id="id", +) + +``` +
+
+#### ⚙️ Parameters +
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` - -
-
-
-**webhooks:** `typing.Optional[typing.Sequence[typing.Optional[PostedWebhookSpec]]]` — Webhook config specifications for each subscriber. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2433,7 +2424,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.list_config_versions(...) +
client.empathic_voice.custom_voices.create_custom_voice_version(...)
@@ -2445,9 +2436,9 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Fetches a list of a **Config's** versions. +Updates a **Custom Voice** by creating a new version of the **Custom Voice**. -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2467,8 +2458,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_config_versions( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.empathic_voice.custom_voices.create_custom_voice_version( + id="id", + name="name", + base_voice="ITO", ) ``` @@ -2485,7 +2478,7 @@ client.empathic_voice.configs.list_config_versions(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2493,11 +2486,7 @@ client.empathic_voice.configs.list_config_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -2505,11 +2494,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice.
@@ -2517,7 +2502,11 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -2537,7 +2526,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) +
client.empathic_voice.custom_voices.delete_custom_voice(...)
@@ -2549,9 +2538,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Config** by creating a new version of the **Config**. +Deletes a **Custom Voice** and its versions. -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2567,52 +2556,12 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEllmModel, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version_description="This is an updated version of the Weather Assistant Config.", - evi_version="2", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - voice=PostedVoice( - provider="HUME_AI", - name="ITO", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - ellm_model=PostedEllmModel( - allow_short_responses=True, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), +client.empathic_voice.custom_voices.delete_custom_voice( + id="id", ) ``` @@ -2629,7 +2578,7 @@ client.empathic_voice.configs.create_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2637,87 +2586,72 @@ client.empathic_voice.configs.create_config_version(
-**evi_version:** `str` — The version of the EVI used with this config. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**version_description:** `typing.Optional[str]` — An optional description of the Config version. -
-
-
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` -
+
+
client.empathic_voice.custom_voices.update_custom_voice_name(...)
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. - -
-
+#### 📝 Description
-**language_model:** `typing.Optional[PostedLanguageModel]` - -The supplemental language model associated with this Config version. - -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - -
-
-
-**ellm_model:** `typing.Optional[PostedEllmModel]` +Updates the name of a **Custom Voice**. -The eLLM setup associated with this Config version. - -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +
+
+#### 🔌 Usage +
-**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. - -
-
-
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. - +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.custom_voices.update_custom_voice_name( + id="id", + name="name", +) + +```
+ + + +#### ⚙️ Parameters
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` - -
-
-
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2725,7 +2659,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**webhooks:** `typing.Optional[typing.Sequence[typing.Optional[PostedWebhookSpec]]]` — Webhook config specifications for each subscriber. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -2745,7 +2679,8 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.delete_config(...) +## EmpathicVoice Configs +
client.empathic_voice.configs.list_configs(...)
@@ -2757,7 +2692,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Deletes a **Config** and its versions. +Fetches a paginated list of **Configs**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2779,8 +2714,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.empathic_voice.configs.list_configs( + page_number=0, + page_size=1, ) ``` @@ -2797,7 +2733,39 @@ client.empathic_voice.configs.delete_config(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include configs with this name.
@@ -2817,7 +2785,7 @@ client.empathic_voice.configs.delete_config(
-
client.empathic_voice.configs.update_config_name(...) +
client.empathic_voice.configs.create_config(...)
@@ -2829,7 +2797,7 @@ client.empathic_voice.configs.delete_config(
-Updates the name of a **Config**. +Creates a **Config** which can be applied to EVI. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2847,13 +2815,47 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_name( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - name="Updated Weather Assistant Config Name", +client.empathic_voice.configs.create_config( + name="Weather Assistant Config", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + evi_version="2", + voice=PostedVoice( + provider="HUME_AI", + name="SAMPLE VOICE", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), ) ``` @@ -2870,7 +2872,7 @@ client.empathic_voice.configs.update_config_name(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2).
@@ -2886,72 +2888,71 @@ client.empathic_voice.configs.update_config_name(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**version_description:** `typing.Optional[str]` — An optional description of the Config version.
-
-
+
+
+**prompt:** `typing.Optional[PostedConfigPromptSpec]` +
-
-
client.empathic_voice.configs.get_config_version(...)
-#### 📝 Description +**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. + +
+
-
-
+**language_model:** `typing.Optional[PostedLanguageModel]` -Fetches a specified version of a **Config**. +The supplemental language model associated with this Config. -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). -
-
+This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. +
-#### 🔌 Usage - -
-
-
-```python -from hume import HumeClient +**ellm_model:** `typing.Optional[PostedEllmModel]` -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.configs.get_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, -) +The eLLM setup associated with this Config. -``` +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. +
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. +
-#### ⚙️ Parameters -
+**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. + +
+
+
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**event_messages:** `typing.Optional[PostedEventMessageSpecs]`
@@ -2959,13 +2960,15 @@ client.empathic_voice.configs.get_config_version(
-**version:** `int` - -Version number for a Config. +**timeouts:** `typing.Optional[PostedTimeoutSpecs]` + +
+
-Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +**webhooks:** `typing.Optional[typing.Sequence[typing.Optional[PostedWebhookSpec]]]` — Webhook config specifications for each subscriber.
@@ -2985,7 +2988,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.delete_config_version(...) +
client.empathic_voice.configs.list_config_versions(...)
@@ -2997,7 +3000,7 @@ Version numbers are integer values representing different iterations of the Conf
-Deletes a specified version of a **Config**. +Fetches a list of a **Config's** versions. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3019,9 +3022,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config_version( +client.empathic_voice.configs.list_config_versions( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, ) ``` @@ -3046,13 +3048,31 @@ client.empathic_voice.configs.delete_config_version(
-**version:** `int` +**page_number:** `typing.Optional[int]` -Version number for a Config. +Specifies the page number to retrieve, enabling pagination. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false.
@@ -3072,7 +3092,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.update_config_description(...) +
client.empathic_voice.configs.create_config_version(...)
@@ -3084,7 +3104,7 @@ Version numbers are integer values representing different iterations of the Conf
-Updates the description of a **Config**. +Updates a **Config** by creating a new version of the **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3102,14 +3122,52 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEllmModel, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_description( +client.empathic_voice.configs.create_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, - version_description="This is an updated version_description.", + version_description="This is an updated version of the Weather Assistant Config.", + evi_version="2", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + voice=PostedVoice( + provider="HUME_AI", + name="ITO", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + ellm_model=PostedEllmModel( + allow_short_responses=True, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), ) ``` @@ -3134,13 +3192,87 @@ client.empathic_voice.configs.update_config_description(
-**version:** `int` +**evi_version:** `str` — The version of the EVI used with this config. + +
+
-Version number for a Config. +
+
-Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +**version_description:** `typing.Optional[str]` — An optional description of the Config version. + +
+
+ +
+
+ +**prompt:** `typing.Optional[PostedConfigPromptSpec]` + +
+
+ +
+
+ +**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. + +
+
+ +
+
+ +**language_model:** `typing.Optional[PostedLanguageModel]` + +The supplemental language model associated with this Config version. + +This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. + +
+
+ +
+
+ +**ellm_model:** `typing.Optional[PostedEllmModel]` + +The eLLM setup associated with this Config version. + +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. + +
+
+ +
+
+ +**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. + +
+
+ +
+
+ +**event_messages:** `typing.Optional[PostedEventMessageSpecs]` + +
+
+ +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -3148,7 +3280,7 @@ Version numbers are integer values representing different iterations of the Conf
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**webhooks:** `typing.Optional[typing.Sequence[typing.Optional[PostedWebhookSpec]]]` — Webhook config specifications for each subscriber.
@@ -3168,8 +3300,7 @@ Version numbers are integer values representing different iterations of the Conf
-## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) +
client.empathic_voice.configs.delete_config(...)
@@ -3181,7 +3312,9 @@ Version numbers are integer values representing different iterations of the Conf
-Fetches a paginated list of **Chats**. +Deletes a **Config** and its versions. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3201,16 +3334,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chats( - page_number=0, - page_size=1, - ascending_order=True, +client.empathic_voice.configs.delete_config( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -3226,39 +3352,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - -
-
- -
-
- -**config_id:** `typing.Optional[str]` — Filter to only include chats that used this config. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3278,7 +3372,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) +
client.empathic_voice.configs.update_config_name(...)
@@ -3290,7 +3384,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Fetches a paginated list of **Chat** events. +Updates the name of a **Config**. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3310,17 +3406,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chat_events( - id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", - page_number=0, - page_size=3, - ascending_order=True, +client.empathic_voice.configs.update_config_name( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + name="Updated Weather Assistant Config Name", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -3336,31 +3425,7 @@ for page in response.iter_pages():
-**id:** `str` — Identifier for a Chat. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3368,7 +3433,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**name:** `str` — Name applied to all versions of a particular Config.
@@ -3388,7 +3453,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chats.get_audio(...) +
client.empathic_voice.configs.get_config_version(...)
@@ -3400,7 +3465,9 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches the audio of a previous **Chat**. For more details, see our guide on audio reconstruction [here](/docs/empathic-voice-interface-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). +Fetches a specified version of a **Config**. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3420,8 +3487,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chats.get_audio( - id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", +client.empathic_voice.configs.get_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, ) ``` @@ -3438,7 +3506,21 @@ client.empathic_voice.chats.get_audio(
-**id:** `str` — Identifier for a chat. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID. + +
+
+ +
+
+ +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3458,8 +3540,7 @@ client.empathic_voice.chats.get_audio(
-## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) +
client.empathic_voice.configs.delete_config_version(...)
@@ -3471,7 +3552,9 @@ client.empathic_voice.chats.get_audio(
-Fetches a paginated list of **Chat Groups**. +Deletes a specified version of a **Config**. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3491,11 +3574,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_groups( - page_number=0, - page_size=1, - ascending_order=True, - config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.empathic_voice.configs.delete_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, ) ``` @@ -3512,23 +3593,7 @@ client.empathic_voice.chat_groups.list_chat_groups(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3536,19 +3601,13 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - -
-
- -
-
+**version:** `int` -**config_id:** `typing.Optional[str]` +Version number for a Config. -The unique identifier for an EVI configuration. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. -Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat. +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3568,7 +3627,7 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-
client.empathic_voice.chat_groups.get_chat_group(...) +
client.empathic_voice.configs.update_config_description(...)
@@ -3580,7 +3639,9 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**. +Updates the description of a **Config**. + +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3600,11 +3661,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.get_chat_group( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=1, - ascending_order=True, +client.empathic_voice.configs.update_config_description( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -3621,7 +3681,7 @@ client.empathic_voice.chat_groups.get_chat_group(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3629,23 +3689,13 @@ client.empathic_voice.chat_groups.get_chat_group(
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
+**version:** `int` -**page_number:** `typing.Optional[int]` +Version number for a Config. -Specifies the page number to retrieve, enabling pagination. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3653,7 +3703,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**version_description:** `typing.Optional[str]` — An optional description of the Config version.
@@ -3673,7 +3723,8 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.list_chat_group_events(...) +## EmpathicVoice Chats +
client.empathic_voice.chats.list_chats(...)
@@ -3685,7 +3736,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches a paginated list of **Chat** events associated with a **Chat Group**. +Fetches a paginated list of **Chats**.
@@ -3705,12 +3756,16 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_group_events( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", +response = client.empathic_voice.chats.list_chats( page_number=0, - page_size=3, + page_size=1, ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -3726,7 +3781,11 @@ client.empathic_voice.chat_groups.list_chat_group_events(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3746,11 +3805,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3758,7 +3813,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**config_id:** `typing.Optional[str]` — Filter to only include chats that used this config.
@@ -3778,7 +3833,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.get_audio(...) +
client.empathic_voice.chats.list_chat_events(...)
@@ -3790,7 +3845,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches a paginated list of audio for each **Chat** within the specified **Chat Group**. For more details, see our guide on audio reconstruction [here](/docs/empathic-voice-interface-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi). +Fetches a paginated list of **Chat** events.
@@ -3810,12 +3865,17 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.get_audio( - id="369846cf-6ad5-404d-905e-a8acb5cdfc78", +response = client.empathic_voice.chats.list_chat_events( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", page_number=0, - page_size=10, + page_size=3, ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -3831,7 +3891,7 @@ client.empathic_voice.chat_groups.get_audio(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. +**id:** `str` — Identifier for a Chat. Formatted as a UUID.
@@ -3839,7 +3899,7 @@ client.empathic_voice.chat_groups.get_audio(
-**page_number:** `typing.Optional[int]` +**page_size:** `typing.Optional[int]` Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. @@ -3851,7 +3911,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**page_size:** `typing.Optional[int]` +**page_number:** `typing.Optional[int]` Specifies the page number to retrieve, enabling pagination. @@ -3883,8 +3943,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) +
client.empathic_voice.chats.get_audio(...)
@@ -3896,7 +3955,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Sort and filter jobs. +Fetches the audio of a previous **Chat**. For more details, see our guide on audio reconstruction [here](/docs/empathic-voice-interface-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi).
@@ -3916,7 +3975,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.list_jobs() +client.empathic_voice.chats.get_audio( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", +) ``` @@ -3932,7 +3993,7 @@ client.expression_measurement.batch.list_jobs()
-**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. +**id:** `str` — Identifier for a chat. Formatted as a UUID.
@@ -3940,37 +4001,77 @@ client.expression_measurement.batch.list_jobs()
-**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ +
-Include only jobs of this status in the response. There are four possible statuses: -- `QUEUED`: The job has been received and is waiting to be processed. + + +
-- `IN_PROGRESS`: The job is currently being processed. +## EmpathicVoice ChatGroups +
client.empathic_voice.chat_groups.list_chat_groups(...) +
+
-- `COMPLETED`: The job has finished processing. +#### 📝 Description -- `FAILED`: The job encountered an error and could not be completed successfully. - +
+
+ +
+
+ +Fetches a paginated list of **Chat Groups**. +
+
+#### 🔌 Usage +
-**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. - +
+
+ +```python +from hume import HumeClient + +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.chat_groups.list_chat_groups( + page_number=0, + page_size=1, + ascending_order=True, + config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +) + +``` +
+
+#### ⚙️ Parameters +
-**timestamp_ms:** `typing.Optional[int]` +
+
-Provide a timestamp in milliseconds to filter jobs. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. -When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3978,15 +4079,19 @@ When combined with the `when` parameter, you can filter jobs before or after the
-**sort_by:** `typing.Optional[SortBy]` +**page_size:** `typing.Optional[int]` -Specify which timestamp to sort the jobs by. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. +
+
-- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3994,13 +4099,11 @@ Specify which timestamp to sort the jobs by.
-**direction:** `typing.Optional[Direction]` - -Specify the order in which to sort the jobs. Defaults to descending order. +**config_id:** `typing.Optional[str]` -- `asc`: Sort in ascending order (chronological, with the oldest records first). +The unique identifier for an EVI configuration. -- `desc`: Sort in descending order (reverse-chronological, with the newest records first). +Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat.
@@ -4020,7 +4123,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) +
client.empathic_voice.chat_groups.get_chat_group(...)
@@ -4032,7 +4135,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-Start a new measurement inference job. +Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**.
@@ -4052,9 +4155,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job( - urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], - notify=True, +client.empathic_voice.chat_groups.get_chat_group( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=1, + ascending_order=True, ) ``` @@ -4071,19 +4176,7 @@ client.expression_measurement.batch.start_inference_job(
-**models:** `typing.Optional[Models]` - -Specify the models to use for inference. - -If this field is not explicitly set, then all models will run by default. - -
-
- -
-
- -**transcription:** `typing.Optional[Transcription]` +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
@@ -4091,11 +4184,11 @@ If this field is not explicitly set, then all models will run by default.
-**urls:** `typing.Optional[typing.Sequence[str]]` +**page_size:** `typing.Optional[int]` -URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -4103,15 +4196,11 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. - -
-
+**page_number:** `typing.Optional[int]` -
-
+Specifies the page number to retrieve, enabling pagination. -**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -4119,7 +4208,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -4139,7 +4228,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) +
client.empathic_voice.chat_groups.list_chat_group_events(...)
@@ -4151,7 +4240,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-Get the request details and state of a given job. +Fetches a paginated list of **Chat** events associated with a **Chat Group**.
@@ -4171,8 +4260,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_details( - id="job_id", +client.empathic_voice.chat_groups.list_chat_group_events( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=3, + ascending_order=True, ) ``` @@ -4189,77 +4281,39 @@ client.expression_measurement.batch.get_job_details(
-**id:** `str` — The unique identifier for the job. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
- -
- - - - -
-
client.expression_measurement.batch.get_job_predictions(...)
-#### 📝 Description - -
-
+**page_size:** `typing.Optional[int]` -
-
+Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Get the JSON predictions of a completed inference job. -
-
+For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +
-#### 🔌 Usage -
-
-
- -```python -from hume import HumeClient +**page_number:** `typing.Optional[int]` -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.expression_measurement.batch.get_job_predictions( - id="job_id", -) +Specifies the page number to retrieve, enabling pagination. -``` -
-
+This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +
-#### ⚙️ Parameters -
-
-
- -**id:** `str` — The unique identifier for the job. +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -4279,7 +4333,7 @@ client.expression_measurement.batch.get_job_predictions(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
client.empathic_voice.chat_groups.get_audio(...)
@@ -4291,7 +4345,7 @@ client.expression_measurement.batch.get_job_predictions(
-Start a new batch inference job. +Fetches a paginated list of audio for each **Chat** within the specified **Chat Group**. For more details, see our guide on audio reconstruction [here](/docs/empathic-voice-interface-evi/faq#can-i-access-the-audio-of-previous-conversations-with-evi).
@@ -4311,7 +4365,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job_from_local_file() +client.empathic_voice.chat_groups.get_audio( + id="369846cf-6ad5-404d-905e-a8acb5cdfc78", + page_number=0, + page_size=10, + ascending_order=True, +) ``` @@ -4327,9 +4386,19 @@ client.expression_measurement.batch.start_inference_job_from_local_file()
-**file:** `from __future__ import annotations +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. + +
+
-typing.List[core.File]` — See core.File for more documentation +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -4337,7 +4406,19 @@ typing.List[core.File]` — See core.File for more documentation
-**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration. +**page_size:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
diff --git a/src/hume/base_client.py b/src/hume/base_client.py index 889ec357..d293f18b 100644 --- a/src/hume/base_client.py +++ b/src/hume/base_client.py @@ -4,11 +4,11 @@ from .environment import HumeClientEnvironment import httpx from .core.client_wrapper import SyncClientWrapper -from .empathic_voice.client import EmpathicVoiceClient from .expression_measurement.client import ExpressionMeasurementClient +from .empathic_voice.client import EmpathicVoiceClient from .core.client_wrapper import AsyncClientWrapper -from .empathic_voice.client import AsyncEmpathicVoiceClient from .expression_measurement.client import AsyncExpressionMeasurementClient +from .empathic_voice.client import AsyncEmpathicVoiceClient class BaseHumeClient: @@ -69,8 +69,8 @@ def __init__( else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.empathic_voice = EmpathicVoiceClient(client_wrapper=self._client_wrapper) self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) + self.empathic_voice = EmpathicVoiceClient(client_wrapper=self._client_wrapper) class AsyncBaseHumeClient: @@ -131,8 +131,8 @@ def __init__( else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.empathic_voice = AsyncEmpathicVoiceClient(client_wrapper=self._client_wrapper) self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) + self.empathic_voice = AsyncEmpathicVoiceClient(client_wrapper=self._client_wrapper) def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumeClientEnvironment) -> str: diff --git a/src/hume/empathic_voice/custom_voices/client.py b/src/hume/empathic_voice/custom_voices/client.py index 6ac8090a..3829b322 100644 --- a/src/hume/empathic_voice/custom_voices/client.py +++ b/src/hume/empathic_voice/custom_voices/client.py @@ -412,6 +412,18 @@ def update_custom_voice_name( ------- str Success + + Examples + -------- + from hume import HumeClient + + client = HumeClient( + api_key="YOUR_API_KEY", + ) + client.empathic_voice.custom_voices.update_custom_voice_name( + id="id", + name="name", + ) """ _response = self._client_wrapper.httpx_client.request( f"v0/evi/custom_voices/{jsonable_encoder(id)}", @@ -873,6 +885,26 @@ async def update_custom_voice_name( ------- str Success + + Examples + -------- + import asyncio + + from hume import AsyncHumeClient + + client = AsyncHumeClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.empathic_voice.custom_voices.update_custom_voice_name( + id="id", + name="name", + ) + + + asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( f"v0/evi/custom_voices/{jsonable_encoder(id)}", diff --git a/src/hume/empathic_voice/types/context.py b/src/hume/empathic_voice/types/context.py index 0cc7c4b3..01046b1e 100644 --- a/src/hume/empathic_voice/types/context.py +++ b/src/hume/empathic_voice/types/context.py @@ -18,9 +18,9 @@ class Context(UniversalBaseModel): - **Temporary**: The context is appended only to the next user message. - - **Editable**: The original context is updated to reflect the new context. + - **Editable**: The original context is updated to reflect the new context. - If the type is not specified, it will default to `temporary`. + If the type is not specified, it will default to `temporary`. """ text: str = pydantic.Field() diff --git a/src/hume/empathic_voice/types/posted_custom_voice.py b/src/hume/empathic_voice/types/posted_custom_voice.py index 171f0a76..306ac370 100644 --- a/src/hume/empathic_voice/types/posted_custom_voice.py +++ b/src/hume/empathic_voice/types/posted_custom_voice.py @@ -14,7 +14,7 @@ class PostedCustomVoice(UniversalBaseModel): If a Custom Voice specification is not provided then the [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) of a base voice or previously created Custom Voice must be provided. - See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. + See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. """ name: str = pydantic.Field() @@ -34,7 +34,7 @@ class PostedCustomVoice(UniversalBaseModel): parameters: typing.Optional[PostedCustomVoiceParameters] = pydantic.Field(default=None) """ - The specified attributes of a Custom Voice. + The specified attributes of a Custom Voice. If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. """ diff --git a/src/hume/empathic_voice/types/posted_webhook_spec.py b/src/hume/empathic_voice/types/posted_webhook_spec.py index f03c5d3b..322fb873 100644 --- a/src/hume/empathic_voice/types/posted_webhook_spec.py +++ b/src/hume/empathic_voice/types/posted_webhook_spec.py @@ -14,12 +14,14 @@ class PostedWebhookSpec(UniversalBaseModel): url: str = pydantic.Field() """ - URL to send the webhook to + The URL where event payloads will be sent. This must be a valid https URL to ensure secure communication. The server at this URL must accept POST requests with a JSON payload. """ events: typing.List[PostedWebhookEventType] = pydantic.Field() """ - Events this URL is subscribed to + The list of events the specified URL is subscribed to. + + See our [webhooks guide](/docs/empathic-voice-interface-evi/configuration#supported-events) for more information on supported events. """ if IS_PYDANTIC_V2: diff --git a/src/hume/empathic_voice/types/return_webhook_spec.py b/src/hume/empathic_voice/types/return_webhook_spec.py index 4839b49d..2f39590b 100644 --- a/src/hume/empathic_voice/types/return_webhook_spec.py +++ b/src/hume/empathic_voice/types/return_webhook_spec.py @@ -14,12 +14,14 @@ class ReturnWebhookSpec(UniversalBaseModel): url: str = pydantic.Field() """ - Webhook URL to send the event updates to + The URL where event payloads will be sent. This must be a valid https URL to ensure secure communication. The server at this URL must accept POST requests with a JSON payload. """ events: typing.List[ReturnWebhookEventType] = pydantic.Field() """ - Events this URL is subscribed to + The list of events the specified URL is subscribed to. + + See our [webhooks guide](/docs/empathic-voice-interface-evi/configuration#supported-events) for more information on supported events. """ if IS_PYDANTIC_V2: diff --git a/src/hume/expression_measurement/batch/client.py b/src/hume/expression_measurement/batch/client.py index fbd90b23..bddf1fa2 100644 --- a/src/hume/expression_measurement/batch/client.py +++ b/src/hume/expression_measurement/batch/client.py @@ -65,7 +65,7 @@ def list_jobs( timestamp_ms : typing.Optional[int] Provide a timestamp in milliseconds to filter jobs. - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. sort_by : typing.Optional[SortBy] Specify which timestamp to sort the jobs by. @@ -449,7 +449,7 @@ async def list_jobs( timestamp_ms : typing.Optional[int] Provide a timestamp in milliseconds to filter jobs. - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. sort_by : typing.Optional[SortBy] Specify which timestamp to sort the jobs by. diff --git a/src/hume/expression_measurement/batch/types/transcription.py b/src/hume/expression_measurement/batch/types/transcription.py index a94c7553..817e8b3a 100644 --- a/src/hume/expression_measurement/batch/types/transcription.py +++ b/src/hume/expression_measurement/batch/types/transcription.py @@ -19,7 +19,6 @@ class Transcription(UniversalBaseModel): By default, we use an automated language detection method for our Speech Prosody, Language, and NER models. However, if you know what language is being spoken in your media samples, you can specify it via its BCP-47 tag and potentially obtain more accurate results. You can specify any of the following languages: - - Chinese: `zh` - Danish: `da` - Dutch: `nl` diff --git a/tests/empathic_voice/test_custom_voices.py b/tests/empathic_voice/test_custom_voices.py index 58fa78c5..dfd5d91e 100644 --- a/tests/empathic_voice/test_custom_voices.py +++ b/tests/empathic_voice/test_custom_voices.py @@ -214,3 +214,13 @@ async def test_delete_custom_voice(client: HumeClient, async_client: AsyncHumeCl await async_client.empathic_voice.custom_voices.delete_custom_voice(id="id") # type: ignore[func-returns-value] is None ) + + +async def test_update_custom_voice_name(client: HumeClient, async_client: AsyncHumeClient) -> None: + expected_response: typing.Any = "string" + expected_types: typing.Any = None + response = client.empathic_voice.custom_voices.update_custom_voice_name(id="id", name="name") + validate_response(response, expected_response, expected_types) + + async_response = await async_client.empathic_voice.custom_voices.update_custom_voice_name(id="id", name="name") + validate_response(async_response, expected_response, expected_types)