diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..5de244cb3 --- /dev/null +++ b/404.html @@ -0,0 +1 @@ +
apatch(client, mode=Mode.TOOLS)
¶No longer necessary, use patch
instead.
Patch the client.chat.completions.create
method
Enables the following features:
response_model
parameter to parse the response from OpenAI's APImax_retries
parameter to retry the function if the response is not validvalidation_context
parameter to validate the response using the pydantic modelstrict
parameter to use strict json parsinginstructor/patch.py
dump_message(message)
¶Dumps a message to a dict, to be returned to the OpenAI API. Workaround for an issue with the OpenAI API, where the tool_calls
field isn't allowed to be present in requests if it isn't used.
instructor/patch.py
handle_response_model(response_model, mode=Mode.TOOLS, **kwargs)
¶Prepare the response model type hint, and returns the response_model along with the new modified kwargs needed to be able to use the response_model parameter with the patch function.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
response_model | T | The response model to use for parsing the response | required |
mode | Mode | The openai completion mode. Defaults to Mode.TOOLS. | TOOLS |
Raises:
Type | Description |
---|---|
NotImplementedError | When using stream=True with a non-iterable response_model |
ValueError | When using an invalid patch mode |
Returns:
Type | Description |
---|---|
Union[Type[OpenAISchema], dict] | Union[Type[OpenAISchema], dict]: The response model to use for parsing the response |
instructor/patch.py
62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 |
|
is_async(func)
¶Returns true if the callable is async, accounting for wrapped callables
instructor/patch.py
patch(client=None, create=None, mode=Mode.TOOLS)
¶Patch the client.chat.completions.create
method
Enables the following features:
response_model
parameter to parse the response from OpenAI's APImax_retries
parameter to retry the function if the response is not validvalidation_context
parameter to validate the response using the pydantic modelstrict
parameter to use strict json parsinginstructor/patch.py
510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 |
|
process_response(response, *, response_model, stream, validation_context=None, strict=None, mode=Mode.TOOLS)
¶Processes a OpenAI response with the response model, if available.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
response | T | The response from OpenAI's API | required |
response_model | Type[T_Model] | The response model to use for parsing the response | required |
stream | bool | Whether the response is a stream | required |
validation_context | dict | The validation context to use for validating the response. Defaults to None. | None |
strict | _type_ | Whether to use strict json parsing. Defaults to None. | None |
mode | Mode | The openai completion mode. Defaults to Mode.FUNCTIONS. | TOOLS |
Returns:
Type | Description |
---|---|
Union[T_Model, T] | Union[T_Model, T]: The parsed response, if a response model is available, otherwise the response as is from the SDK |
instructor/patch.py
process_response_async(response, *, response_model, stream=False, validation_context=None, strict=None, mode=Mode.TOOLS)
async
¶Processes a OpenAI response with the response model, if available. It can use validation_context
and strict
to validate the response via the pydantic model
Parameters:
Name | Type | Description | Default |
---|---|---|---|
response | ChatCompletion | The response from OpenAI's API | required |
response_model | BaseModel | The response model to use for parsing the response | required |
stream | bool | Whether the response is a stream | False |
validation_context | dict | The validation context to use for validating the response. Defaults to None. | None |
strict | bool | Whether to use strict json parsing. Defaults to None. | None |
instructor/patch.py
Validator
¶ Bases: OpenAISchema
Validate if an attribute is correct and if not, return a new value with an error message
instructor/dsl/validators.py
llm_validator(statement, allow_override=False, model='gpt-3.5-turbo', temperature=0, openai_client=None)
¶Create a validator that uses the LLM to validate an attribute
from instructor import llm_validator
+from pydantic import BaseModel, Field, field_validator
+
+class User(BaseModel):
+ name: str = Annotated[str, llm_validator("The name must be a full name all lowercase")
+ age: int = Field(description="The age of the person")
+
+try:
+ user = User(name="Jason Liu", age=20)
+except ValidationError as e:
+ print(e)
+
1 validation error for User
+name
+ The name is valid but not all lowercase (type=value_error.llm_validator)
+
Note that there, the error message is written by the LLM, and the error type is value_error.llm_validator
.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
statement | str | The statement to validate | required |
model | str | The LLM to use for validation (default: "gpt-3.5-turbo-0613") | 'gpt-3.5-turbo' |
temperature | float | The temperature to use for the LLM (default: 0) | 0 |
openai_client | OpenAI | The OpenAI client to use (default: None) | None |
instructor/dsl/validators.py
openai_moderation(client=None)
¶Validates a message using OpenAI moderation model.
Should only be used for monitoring inputs and outputs of OpenAI APIs Other use cases are disallowed as per: https://platform.openai.com/docs/guides/moderation/overview
Example:
from instructor import OpenAIModeration
+
+class Response(BaseModel):
+ message: Annotated[str, AfterValidator(OpenAIModeration(openai_client=client))]
+
+Response(message="I hate you")
+
ValidationError: 1 validation error for Response
+ message
+Value error, `I hate you.` was flagged for ['harassment'] [type=value_error, input_value='I hate you.', input_type=str]
+
client (OpenAI): The OpenAI client to use, must be sync (default: None)
instructor/dsl/validators.py
IterableModel(subtask_class, name=None, description=None)
¶Dynamically create a IterableModel OpenAISchema that can be used to segment multiple tasks given a base class. This creates class that can be used to create a toolkit for a specific task, names and descriptions are automatically generated. However they can be overridden.
from pydantic import BaseModel, Field
+from instructor import IterableModel
+
+class User(BaseModel):
+ name: str = Field(description="The name of the person")
+ age: int = Field(description="The age of the person")
+ role: str = Field(description="The role of the person")
+
+MultiUser = IterableModel(User)
+
class MultiUser(OpenAISchema, MultiTaskBase):
+ tasks: List[User] = Field(
+ default_factory=list,
+ repr=False,
+ description="Correctly segmented list of `User` tasks",
+ )
+
+ @classmethod
+ def from_streaming_response(cls, completion) -> Generator[User]:
+ '''
+ Parse the streaming response from OpenAI and yield a `User` object
+ for each task in the response
+ '''
+ json_chunks = cls.extract_json(completion)
+ yield from cls.tasks_from_chunks(json_chunks)
+
Parameters:
Name | Type | Description | Default |
---|---|---|---|
subtask_class | Type[OpenAISchema] | The base class to use for the MultiTask | required |
name | Optional[str] | The name of the MultiTask class, if None then the name of the subtask class is used as | None |
description | Optional[str] | The description of the MultiTask class, if None then the description is set to | None |
Returns:
Name | Type | Description |
---|---|---|
schema | OpenAISchema | A new class that can be used to segment multiple tasks |
instructor/dsl/iterable.py
123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 |
|
Partial
¶ Bases: Generic[Model]
Generate a new class with all attributes optionals.
This will wrap a class inheriting form BaseModel and will recursively convert all its attributes and its children's attributes to optionals.
Partial[SomeModel]
instructor/dsl/partial.py
139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 |
|
__class_getitem__(wrapped_class)
¶Convert model to a partial model with all fields being optionals.
instructor/dsl/partial.py
__init_subclass__(*args, **kwargs)
¶Cannot subclass.
Raises:
Type | Description |
---|---|
TypeError | Subclassing not allowed. |
instructor/dsl/partial.py
__new__(*args, **kwargs)
¶Cannot instantiate.
Raises:
Type | Description |
---|---|
TypeError | Direct instantiation not allowed. |
instructor/dsl/partial.py
MaybeBase
¶ Bases: BaseModel
, Generic[T]
Extract a result from a model, if any, otherwise set the error and message fields.
instructor/dsl/maybe.py
Maybe(model)
¶Create a Maybe model for a given Pydantic model. This allows you to return a model that includes fields for result
, error
, and message
for sitatations where the data may not be present in the context.
from pydantic import BaseModel, Field
+from instructor import Maybe
+
+class User(BaseModel):
+ name: str = Field(description="The name of the person")
+ age: int = Field(description="The age of the person")
+ role: str = Field(description="The role of the person")
+
+MaybeUser = Maybe(User)
+
class MaybeUser(BaseModel):
+ result: Optional[User]
+ error: bool = Field(default=False)
+ message: Optional[str]
+
+ def __bool__(self):
+ return self.result is not None
+
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model | Type[BaseModel] | The Pydantic model to wrap with Maybe. | required |
Returns:
Name | Type | Description |
---|---|---|
MaybeModel | Type[BaseModel] | A new Pydantic model that includes fields for |
instructor/dsl/maybe.py
Mode
¶ Bases: Enum
The mode to use for patching the client
instructor/function_calls.py
OpenAISchema
¶ Bases: BaseModel
instructor/function_calls.py
37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 |
|
openai_schema: Dict[str, Any]
classmethod
property
¶Return the schema in the format of OpenAI's schema as jsonschema
Its important to add a docstring to describe how to best use this class, it will be included in the description attribute and be part of the prompt.
Returns:
Name | Type | Description |
---|---|---|
model_json_schema | dict | A dictionary in the format of OpenAI's schema as jsonschema |
from_response(completion, validation_context=None, strict=None, mode=Mode.TOOLS)
classmethod
¶Execute the function from the response of an openai chat completion
Parameters:
Name | Type | Description | Default |
---|---|---|---|
completion | ChatCompletion | The response from an openai chat completion | required |
throw_error | bool | Whether to throw an error if the function call is not detected | required |
validation_context | dict | The validation context to use for validating the response | None |
strict | bool | Whether to use strict json parsing | None |
mode | Mode | The openai completion mode | TOOLS |
Returns:
Name | Type | Description |
---|---|---|
cls | OpenAISchema | An instance of the class |
instructor/function_calls.py
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Va=/["'&<>]/;qn.exports=za;function za(e){var t=""+e,r=Va.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i