Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Integrate cohere models #932

Open
wants to merge 18 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions camel/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
from .base_config import BaseConfig
from .cohere_config import COHERE_API_PARAMS, CohereConfig
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
from .groq_config import GROQ_API_PARAMS, GroqConfig
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
Expand Down Expand Up @@ -58,4 +59,6 @@
'SAMBA_CLOUD_API_PARAMS',
'TogetherAIConfig',
'TOGETHERAI_API_PARAMS',
'CohereConfig',
'COHERE_API_PARAMS',
]
76 changes: 76 additions & 0 deletions camel/configs/cohere_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from __future__ import annotations

from typing import List, Optional

from camel.configs.base_config import BaseConfig


class CohereConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
Cohere API.

Args:
temperature (float, optional): Sampling temperature to use, between
:obj:`0` and :obj:`2`. Higher values make the output more random,
while lower values make it more focused and deterministic.
(default: :obj:`0.3`)
documents (dict, optional): A list of relevant documents that the
model can cite to generate a more accurate reply. Each document is
either a string or document object with content and metadata.
(default: :obj:`None`)
max_tokens (int, optional): The maximum number of tokens the model
will generate as part of the response. (default: :obj:`None`)
stop_sequences (List(str), optional): A list of up to 5 strings that
the model will use to stop generation. If the model generates a
string that matches any of the strings in the list, it will stop
generating tokens and return the generated text up to that point
not including the stop sequence. (default: :obj:`None`)
seed (int, optional): If specified, the backend will make a best
effort to sample tokens deterministically, such that repeated
requests with the same seed and parameters should return the same
result. However, determinism cannot be totally guaranteed.
(default: :obj:`None`)
frequency_penalty (float, optional): Min value of `0.0`, max value of
`1.0`. Used to reduce repetitiveness of generated tokens. The
higher the value, the stronger a penalty is applied to previously
present tokens, proportional to how many times they have already
appeared in the prompt or prior generation. (default: :obj:`0.0`)
presence_penalty (float, optional): Min value of `0.0`, max value of
`1.0`. Used to reduce repetitiveness of generated tokens. Similar
to `frequency_penalty`, except that this penalty is applied
equally to all tokens that have already appeared, regardless of
their exact frequencies. (default: :obj:`0.0`)
k (int, optional): Ensures only the top k most likely tokens are
considered for generation at each step. Min value of `0`, max
value of `500`. (default: :obj:`0`)
p (float, optional): Ensures that only the most likely tokens, with
total probability mass of `p`, are considered for generation at
each step. If both k and p are enabled, `p` acts after `k`. Min
value of `0.01`, max value of `0.99`. (default: :obj:`0.75`)
"""

temperature: Optional[float] = 0.2
documents: Optional[dict] = None
max_tokens: Optional[int] = None
stop_sequences: Optional[List[str]] = None
seed: Optional[int] = None
frequency_penalty: Optional[float] = 0.0
presence_penalty: Optional[float] = 0.0
k: Optional[int] = 0
p: Optional[float] = 0.75


COHERE_API_PARAMS = {param for param in CohereConfig().model_fields.keys()}
2 changes: 2 additions & 0 deletions camel/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from .anthropic_model import AnthropicModel
from .azure_openai_model import AzureOpenAIModel
from .base_model import BaseModelBackend
from .cohere_model import CohereModel
from .gemini_model import GeminiModel
from .groq_model import GroqModel
from .litellm_model import LiteLLMModel
Expand All @@ -40,6 +41,7 @@
'GroqModel',
'StubModel',
'ZhipuAIModel',
'CohereModel',
'ModelFactory',
'LiteLLMModel',
'OpenAIAudioModels',
Expand Down
261 changes: 261 additions & 0 deletions camel/models/cohere_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, List, Optional

if TYPE_CHECKING:
from cohere.types import ChatMessageV2, ChatResponse

from camel.configs import COHERE_API_PARAMS
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import ChatCompletion, ModelType
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
)

try:
import os

if os.getenv("AGENTOPS_API_KEY") is not None:
from agentops import LLMEvent, record
else:
raise ImportError
except (ImportError, AttributeError):
LLMEvent = None


class CohereModel(BaseModelBackend):
r"""Cohere API in a unified BaseModelBackend interface."""

def __init__(
self,
model_type: ModelType,
model_config_dict: Dict[str, Any],
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
):
import cohere

NeilJohnson0930 marked this conversation as resolved.
Show resolved Hide resolved
api_key = api_key or os.environ.get("COHERE_API_KEY")
url = url or os.environ.get("COHERE_SERVER_URL")
super().__init__(
model_type, model_config_dict, api_key, url, token_counter
)
self._client = cohere.ClientV2(api_key=self._api_key)

def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
usage = {
"prompt_tokens": response.usage.tokens.input_tokens or 0, # type: ignore[union-attr]
"completion_tokens": response.usage.tokens.output_tokens or 0, # type: ignore[union-attr]
"total_tokens": (response.usage.tokens.input_tokens or 0) # type: ignore[union-attr]
+ (response.usage.tokens.output_tokens or 0), # type: ignore[union-attr]
}

tool_calls = response.message.tool_calls # type: ignore[union-attr]
choices = []
if tool_calls:
for tool_call in tool_calls:
openai_tool_calls = [
dict(
id=tool_call.id, # type: ignore[union-attr]
function={
"name": tool_call.function.name, # type: ignore[union-attr]
"arguments": tool_call.function.arguments, # type: ignore[union-attr]
},
type=tool_call.type,
)
]

choice = dict(
index=None,
message={
"role": "assistant",
"content": response.message.tool_plan, # type: ignore[union-attr]
"tool_calls": openai_tool_calls,
},
finish_reason=response.finish_reason
if response.finish_reason
else None,
)
choices.append(choice)

else:
openai_tool_calls = None

choice = dict(
index=None,
message={
"role": "assistant",
"content": response.message.content[0].text, # type: ignore[union-attr,index]
"tool_calls": openai_tool_calls,
},
finish_reason=response.finish_reason
if response.finish_reason
else None,
)
choices.append(choice)

obj = ChatCompletion.construct(
id=response.id,
choices=choices,
created=None,
model=self.model_type,
object="chat.completion",
usage=usage,
)
return obj

def _to_cohere_chatmessage(
self, messages: List[OpenAIMessage]
) -> List["ChatMessageV2"]:
from cohere.types import ToolCallV2Function
from cohere.types.chat_message_v2 import (
AssistantChatMessageV2,
SystemChatMessageV2,
ToolCallV2,
ToolChatMessageV2,
UserChatMessageV2,
)

new_messages = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
function_call = msg.get("function_call")

if role == "user":
new_message = UserChatMessageV2(role="user", content=content) # type: ignore[arg-type]
elif role == "function":
new_message = ToolChatMessageV2(
role="tool",
tool_call_id="0",
content=content, # type: ignore[assignment,arg-type]
)
elif role == "assistant":
new_message = AssistantChatMessageV2( # type: ignore[assignment]
role="assistant",
tool_calls=[
ToolCallV2(
id="0",
type="function",
function=ToolCallV2Function(
name=function_call.get("name"), # type: ignore[attr-defined]
arguments=function_call.get("arguments"), # type: ignore[attr-defined]
),
)
]
if function_call
else None,
content=content, # type: ignore[arg-type]
)
elif role == "system":
new_message = SystemChatMessageV2( # type: ignore[assignment]
role="system",
content=content, # type: ignore[arg-type]
)
else:
raise ValueError(f"Unsupported message role: {role}")

new_messages.append(new_message)

return new_messages # type: ignore[return-value]

@property
def token_counter(self) -> BaseTokenCounter:
r"""Initialize the token counter for the model backend.

Returns:
BaseTokenCounter: The token counter following the model's
tokenization style.
"""
if not self._token_counter:
self._token_counter = OpenAITokenCounter(
model=ModelType.GPT_4O_MINI
)
return self._token_counter

@api_keys_required("COHERE_API_KEY")
def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
r"""Runs inference of Cohere chat completion.

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.
Returns:
ChatCompletion.
"""
from cohere.core.api_error import ApiError

cohere_messages = self._to_cohere_chatmessage(messages)

try:
response = self._client.chat(
messages=cohere_messages,
model=self.model_type,
**self.model_config_dict,
)
except ApiError as e:
logging.error(f"Cohere API Error: {e.status_code}")
logging.error(f"Error body: {e.body}")
raise
except Exception as e:
logging.error(f"Unexpected error when calling Cohere API: {e!s}")
raise

openai_response = self._to_openai_response(response)

# Add AgentOps LLM Event tracking
if LLMEvent:
llm_event = LLMEvent(
thread_id=openai_response.id,
prompt=" ".join(
[message.get("content") for message in messages] # type: ignore[misc]
),
prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
completion=openai_response.choices[0].message.content,
completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
model=self.model_type,
)
record(llm_event)

return openai_response

def check_model_config(self):
r"""Check whether the model configuration contains any
unexpected arguments to Cohere API.
Raises:
ValueError: If the model configuration dictionary contains any
unexpected arguments to Cohere API.
"""
for param in self.model_config_dict:
if param not in COHERE_API_PARAMS:
raise ValueError(
f"Unexpected argument `{param}` is "
"input into Cohere model backend."
)

@property
def stream(self) -> bool:
r"""Returns whether the model is in stream mode, which sends partial
results each time. Current it's not supported.

Returns:
bool: Whether the model is in stream mode.
"""
return False
3 changes: 3 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from camel.models.anthropic_model import AnthropicModel
from camel.models.azure_openai_model import AzureOpenAIModel
from camel.models.base_model import BaseModelBackend
from camel.models.cohere_model import CohereModel
from camel.models.gemini_model import GeminiModel
from camel.models.groq_model import GroqModel
from camel.models.litellm_model import LiteLLMModel
Expand Down Expand Up @@ -106,6 +107,8 @@ def create(
model_class = MistralModel
elif model_platform.is_reka and model_type.is_reka:
model_class = RekaModel
elif model_platform.is_cohere and model_type.is_cohere:
model_class = CohereModel
elif model_type == ModelType.STUB:
model_class = StubModel

Expand Down
Loading