Skip to content

Commit

Permalink
Merge pull request #194 from juke/Groq-API-Integration
Browse files Browse the repository at this point in the history
Groq api integration
  • Loading branch information
lalalune authored Nov 4, 2024
2 parents 2e440a0 + 96236b2 commit aca0cdd
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 0 deletions.
2 changes: 2 additions & 0 deletions core/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
DISCORD_APPLICATION_ID=
DISCORD_API_TOKEN= # Bot token
OPENAI_API_KEY=sk-* # OpenAI API key, starting with sk-
GROQ_API_KEY=gsk_*

ELEVENLABS_XI_API_KEY= # API key from elevenlabs

# ELEVENLABS SETTINGS
Expand Down
18 changes: 18 additions & 0 deletions core/src/core/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,24 @@ export async function generateText({
break;
}

case ModelProvider.GROQ: {
console.log("Initializing Groq model.");
const groq = createGroq({ apiKey });

const { text: groqResponse } = await aiGenerateText({
model: groq.languageModel(model),
prompt: context,
temperature: temperature,
maxTokens: max_response_length,
frequencyPenalty: frequency_penalty,
presencePenalty: presence_penalty,
});

response = groqResponse;
console.log("Received response from Groq model.");
break;
}

case ModelProvider.LLAMALOCAL:
prettyConsole.log(
"Using local Llama model for text completion."
Expand Down
18 changes: 18 additions & 0 deletions core/src/core/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ type Models = {
[ModelProvider.OPENAI]: Model;
[ModelProvider.ANTHROPIC]: Model;
[ModelProvider.GROK]: Model;
[ModelProvider.GROQ]: Model;
[ModelProvider.LLAMACLOUD]: Model;
[ModelProvider.LLAMALOCAL]: Model;
[ModelProvider.GOOGLE]: Model;
Expand Down Expand Up @@ -78,6 +79,23 @@ const models: Models = {
[ModelClass.EMBEDDING]: "grok-2-beta", // not sure about this one
},
},
[ModelProvider.GROQ]: {
endpoint: "https://api.groq.com/openai/v1",
settings: {
stop: [],
maxInputTokens: 128000,
maxOutputTokens: 8000,
frequency_penalty: 0.0,
presence_penalty: 0.0,
temperature: 0.3,
},
model: {
[ModelClass.SMALL]: "llama-3.1-8b-instant",
[ModelClass.MEDIUM]: "llama-3.1-70b-versatile",
[ModelClass.LARGE]: "llama-3.2-90b-text-preview",
[ModelClass.EMBEDDING]: "llama-3.1-8b-instant",
},
},
[ModelProvider.LLAMACLOUD]: {
settings: {
stop: [],
Expand Down
1 change: 1 addition & 0 deletions core/src/core/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ export enum ModelProvider {
OPENAI = "openai",
ANTHROPIC = "anthropic",
GROK = "grok",
GROQ = "groq",
LLAMACLOUD = "llama_cloud",
LLAMALOCAL = "llama_local",
GOOGLE = "google",
Expand Down

0 comments on commit aca0cdd

Please sign in to comment.