diff --git a/samples/sample_benchmark.ts b/samples/sample_benchmark.ts index f211dcb16..956bdfb40 100644 --- a/samples/sample_benchmark.ts +++ b/samples/sample_benchmark.ts @@ -3,7 +3,7 @@ import { graphDataTestRunner } from "~/utils/runner"; import { groqAgent, fetchAgent, shiftAgent, nestedAgent } from "@/experimental_agents"; const graph_data = { - version: 0.2, + version: 0.3, nodes: { GSM8: { // This node specifies the URL and query paramters to fetch GSM8K dataset. @@ -21,49 +21,49 @@ const graph_data = { fetch: { // This node fetches the dataset over HTTP. agent: "fetchAgent", - inputs: ["GSM8.url", "GSM8.query"], + inputs: [":GSM8.url", ":GSM8.query"], }, rows: { // This node extract the "row" property from each item in the dataset. agent: (items: Array>) => items.map((item) => item.row), - inputs: ["fetch.rows"], + inputs: [":fetch.rows"], }, debugOutputRow: { agent: (row: Record) => console.log(row), - inputs: ["rows.$0"], + inputs: [":rows.$0"], }, loop: { // This node interate all the items in the dataset using the nested graph agent: "nestedAgent", - inputs: ["rows"], + inputs: [":rows"], isResult: true, graph: { // This graph continues until the array on node "$0" becomes empty - version: 0.2, + version: 0.3, loop: { - while: "$0", + while: ":$0", }, nodes: { // This node receives the inputs[0] (data from "rows" node on outer graph) initially // then, updated with the data from "retriever" node after each iteration. $0: { value: undefined, - update: "retriever.array", + update: ":retriever.array", }, // This node accumurate asnwers for each question in the dataset. answers: { value: [], - update: "reducer", + update: ":reducer", isResult: true, }, // This node takes the first item from the array from node "$0". retriever: { agent: "shiftAgent", - inputs: ["$0"], + inputs: [":$0"], }, debugOutputQA: { agent: (item: Record) => console.log(`Q: ${item.question}\nA0: ${item.answer}`), - inputs: ["retriever.item"], + inputs: [":retriever.item"], }, groq: { // This node sends the question on the current item to Llama3 on groq and get the answer. @@ -71,16 +71,16 @@ const graph_data = { params: { model: "Llama3-8b-8192", }, - inputs: ["retriever.item.question"], + inputs: [":retriever.item.question"], }, reducer: { // This node pushs the answer from Llama3 into the answer array. agent: "pushAgent", - inputs: ["answers", "groq.choices.$0.message.content"], + inputs: [":answers", ":groq.choices.$0.message.content"], }, debugOutputA: { agent: (answer: string) => console.log(`A: ${answer}\n`), - inputs: ["groq.choices.$0.message.content"], + inputs: [":groq.choices.$0.message.content"], }, }, }, diff --git a/samples/sample_benchmark2.ts b/samples/sample_benchmark2.ts index a1c019ac8..c5668bb35 100644 --- a/samples/sample_benchmark2.ts +++ b/samples/sample_benchmark2.ts @@ -3,7 +3,7 @@ import { graphDataTestRunner } from "~/utils/runner"; import { groqAgent, fetchAgent, mapAgent } from "@/experimental_agents"; const graph_data = { - version: 0.2, + version: 0.3, nodes: { GSM8: { // This node specifies the URL and query paramters to fetch GSM8K dataset. @@ -21,20 +21,20 @@ const graph_data = { fetch: { // This node fetches the dataset over HTTP. agent: "fetchAgent", - inputs: ["GSM8.url", "GSM8.query"], + inputs: [":GSM8.url", ":GSM8.query"], }, rows: { // This node extract the "row" property from each item in the dataset. agent: (items: Array>) => items.map((item) => item.row), - inputs: ["fetch.rows"], + inputs: [":fetch.rows"], }, map: { // This node executes the nested graph concurrently agent: "mapAgent", - inputs: ["rows"], + inputs: [":rows"], isResult: true, graph: { - version: 0.2, + version: 0.3, nodes: { groq: { // This node sends the question on the current item to Llama3 on groq and get the answer. @@ -42,11 +42,11 @@ const graph_data = { params: { model: "Llama3-8b-8192", }, - inputs: ["$0.question"], + inputs: [":$0.question"], }, answer: { agent: (item: string) => item, - inputs: ["groq.choices.$0.message.content"], + inputs: [":groq.choices.$0.message.content"], isResult: true, }, }, diff --git a/samples/sample_chat.ts b/samples/sample_chat.ts index 18418a63d..047bbe806 100644 --- a/samples/sample_chat.ts +++ b/samples/sample_chat.ts @@ -4,19 +4,19 @@ import { groqAgent, shiftAgent, nestedAgent } from "@/experimental_agents"; import input from "@inquirer/input"; const graph_data = { - version: 0.2, + version: 0.3, loop: { - while: "continue", + while: ":continue", }, nodes: { continue: { value: true, - update: "checkInput", + update: ":checkInput", }, messages: { // This node holds the conversation, array of messages. value: [], - update: "reducer", + update: ":reducer", isResult: true, }, userInput: { @@ -25,13 +25,13 @@ const graph_data = { }, checkInput: { agent: (query: string) => query !== "/bye", - inputs: ["userInput"], + inputs: [":userInput"], }, appendedMessages: { // This node appends the user's input to the array of messages. agent: (content: string, messages: Array) => [...messages, { role: "user", content }], - inputs: ["userInput", "messages"], - if: "checkInput", + inputs: [":userInput", ":messages"], + if: ":checkInput", }, groq: { // This node sends those messages to Llama3 on groq to get the answer. @@ -39,17 +39,17 @@ const graph_data = { params: { model: "Llama3-8b-8192", }, - inputs: [undefined, "appendedMessages"], + inputs: [undefined, ":appendedMessages"], }, output: { // This node displays the responce to the user. agent: (answer: string) => console.log(`Llama3: ${answer}\n`), - inputs: ["groq.choices.$0.message.content"], + inputs: [":groq.choices.$0.message.content"], }, reducer: { // This node append the responce to the messages. agent: "pushAgent", - inputs: ["appendedMessages", "groq.choices.$0.message"], + inputs: [":appendedMessages", ":groq.choices.$0.message"], }, }, }; diff --git a/samples/sample_tool.ts b/samples/sample_tool.ts index 0af6d0a05..1b0617cf4 100644 --- a/samples/sample_tool.ts +++ b/samples/sample_tool.ts @@ -24,7 +24,7 @@ const tools = [ ]; const graph_data = { - version: 0.2, + version: 0.3, concurrency: 1, nodes: { foods: { @@ -32,14 +32,14 @@ const graph_data = { }, categorizer: { agent: "mapAgent", - inputs: ["foods"], + inputs: [":foods"], isResult: true, graph: { - version: 0.2, + version: 0.3, nodes: { debug: { agent: (food: string) => console.log(food), - inputs: ["$0"], + inputs: [":$0"], isResult: true, }, groq: { @@ -52,14 +52,14 @@ const graph_data = { tool_choice: { type: "function", function: { name: "categorize" } }, }, retry: 1, - inputs: ["$0"], + inputs: [":$0"], }, parser: { agent: (food: string, args: string) => { const json = JSON.parse(args); return { [food]: json.category }; }, - inputs: ["$0", "groq.choices.$0.message.tool_calls.$0.function.arguments"], + inputs: [":$0", ":groq.choices.$0.message.tool_calls.$0.function.arguments"], isResult: true, }, }, diff --git a/samples/sample_weather.ts b/samples/sample_weather.ts index 9e31b7fbc..47581f7b3 100644 --- a/samples/sample_weather.ts +++ b/samples/sample_weather.ts @@ -28,19 +28,19 @@ const tools = [ ]; const graph_data = { - version: 0.2, + version: 0.3, loop: { - while: "continue", + while: ":continue", }, nodes: { continue: { value: true, - update: "checkInput", + update: ":checkInput", }, messages: { // This node holds the conversation, array of messages. value: [{ role: "system", content: "You are a meteorologist. Use getWeather API, only when the user ask for the weather information." }], - update: "reducer", + update: ":reducer", isResult: true, }, userInput: { @@ -50,13 +50,13 @@ const graph_data = { checkInput: { // Checkes if the user wants to end the conversation. agent: (query: string) => query !== "/bye", - inputs: ["userInput"], + inputs: [":userInput"], }, messagesWithUserInput: { // Appends the user's input to the messages. agent: (messages: Array, content: string) => [...messages, { role: "user", content }], - inputs: ["messages", "userInput"], - if: "checkInput", + inputs: [":messages", ":userInput"], + if: ":checkInput", }, groq: { // Sends those messages to LLM to get the answer. @@ -65,32 +65,32 @@ const graph_data = { model: "Llama3-8b-8192", tools, }, - inputs: [undefined, "messagesWithUserInput"], + inputs: [undefined, ":messagesWithUserInput"], }, output: { // Displays the response to the user. agent: (answer: string) => console.log(`Llama3: ${answer}\n`), - inputs: ["groq.choices.$0.message.content"], - if: "groq.choices.$0.message.content", + inputs: [":groq.choices.$0.message.content"], + if: ":groq.choices.$0.message.content", }, messagesWithFirstRes: { // Appends the response to the messages. agent: "pushAgent", - inputs: ["messagesWithUserInput", "groq.choices.$0.message"], + inputs: [":messagesWithUserInput", ":groq.choices.$0.message"], }, tool_calls: { // This node is activated if the LLM requests a tool call. agent: "nestedAgent", - inputs: ["groq.choices.$0.message.tool_calls", "messagesWithFirstRes"], - if: "groq.choices.$0.message.tool_calls", + inputs: [":groq.choices.$0.message.tool_calls", ":messagesWithFirstRes"], + if: ":groq.choices.$0.message.tool_calls", graph: { // This graph is nested only for the readability. - version: 0.2, + version: 0.3, nodes: { outputFetching: { agent: (args: any) => console.log(`... fetching weather info ${args}`), - inputs: ["$0.$0.function.arguments"], + inputs: [":$0.$0.function.arguments"], }, urlPoints: { // Builds a URL to fetch the "grid location" from the spcified latitude and longitude @@ -98,7 +98,7 @@ const graph_data = { const { latitude, longitude } = JSON.parse(args); return `https://api.weather.gov/points/${latitude},${longitude}`; }, - inputs: ["$0.$0.function.arguments"], + inputs: [":$0.$0.function.arguments"], }, fetchPoints: { // Fetches the "grid location" from the URL. @@ -106,7 +106,7 @@ const graph_data = { params: { returnErrorResult: true, // returns {status error} in case of error }, - inputs: ["urlPoints", undefined, { "User-Agent": "(receptron.org)" }], + inputs: [":urlPoints", undefined, { "User-Agent": "(receptron.org)" }], }, fetchForecast: { // Fetches the weather forecast for that location. @@ -114,13 +114,13 @@ const graph_data = { params: { type: "text", }, - inputs: ["fetchPoints.properties.forecast", undefined, { "User-Agent": "(receptron.org)" }], - if: "fetchPoints.properties.forecast", + inputs: [":fetchPoints.properties.forecast", undefined, { "User-Agent": "(receptron.org)" }], + if: ":fetchPoints.properties.forecast", }, responseText: { agent: "copyAgent", anyInput: true, - inputs: ["fetchForecast", "fetchPoints.error"], + inputs: [":fetchForecast", ":fetchPoints.error"], }, toolMessage: { // Creates a tool message as the return value of the tool call. @@ -130,17 +130,17 @@ const graph_data = { name: info.function.name, content: res, }), - inputs: ["$0.$0", "responseText"], + inputs: [":$0.$0", ":responseText"], }, filteredMessages: { // Removes previous tool messages to create a room. agent: (messages: any) => messages.filter((message: any) => message.role !== "tool"), - inputs: ["$1"], + inputs: [":$1"], }, messagesWithToolRes: { // Appends that message to the messages. agent: "pushAgent", - inputs: ["filteredMessages", "toolMessage"], + inputs: [":filteredMessages", ":toolMessage"], }, groq: { // Sends those messages to LLM to get the answer. @@ -148,17 +148,17 @@ const graph_data = { params: { model: "Llama3-8b-8192", }, - inputs: [undefined, "messagesWithToolRes"], + inputs: [undefined, ":messagesWithToolRes"], }, output: { // Displays the response to the user. agent: (answer: string) => console.log(`Llama3: ${answer}\n`), - inputs: ["groq.choices.$0.message.content"], + inputs: [":groq.choices.$0.message.content"], }, messagesWithSecondRes: { // Appends the response to the messages. agent: "pushAgent", - inputs: ["messagesWithToolRes", "groq.choices.$0.message"], + inputs: [":messagesWithToolRes", ":groq.choices.$0.message"], isResult: true, }, }, @@ -167,15 +167,15 @@ const graph_data = { no_tool_calls: { // This node is activated only if this is a normal response (not a tool call). agent: "copyAgent", - if: "groq.choices.$0.message.content", - inputs: ["messagesWithFirstRes"], + if: ":groq.choices.$0.message.content", + inputs: [":messagesWithFirstRes"], }, reducer: { // Receives messages from either case. agent: "copyAgent", anyInput: true, - inputs: ["no_tool_calls", "tool_calls.messagesWithSecondRes"], + inputs: [":no_tool_calls", ":tool_calls.messagesWithSecondRes"], }, }, }; diff --git a/samples/sample_wikipedia.ts b/samples/sample_wikipedia.ts index 966d25b15..d95244123 100644 --- a/samples/sample_wikipedia.ts +++ b/samples/sample_wikipedia.ts @@ -5,13 +5,13 @@ import { interactiveInputTextAgent } from "./agents/interactiveInputAgent"; import { wikipediaAgent } from "@/experimental_agents"; const graph_data = { - version: 0.2, + version: 0.3, nodes: { interactiveInputAgent: { agent: "interactiveInputTextAgent", }, wikipedia: { - inputs: ["interactiveInputAgent.answer"], + inputs: [":interactiveInputAgent.answer"], agent: "wikipediaAgent", params: { lang: "ja", diff --git a/samples/sample_wikipedia2.ts b/samples/sample_wikipedia2.ts index 2052f7e8d..3bceadbb4 100644 --- a/samples/sample_wikipedia2.ts +++ b/samples/sample_wikipedia2.ts @@ -13,7 +13,7 @@ import { } from "@/experimental_agents"; const graph_data = { - version: 0.2, + version: 0.3, nodes: { source: { value: { @@ -25,7 +25,7 @@ const graph_data = { wikipedia: { // Fetch an article from Wikipedia agent: "wikipediaAgent", - inputs: ["source.name"], + inputs: [":source.name"], params: { lang: "en", }, @@ -33,32 +33,32 @@ const graph_data = { chunks: { // Break that article into chunks agent: "stringSplitterAgent", - inputs: ["wikipedia.content"], + inputs: [":wikipedia.content"], }, embeddings: { // Get embedding vectors of those chunks agent: "stringEmbeddingsAgent", - inputs: ["chunks.contents"], + inputs: [":chunks.contents"], }, topicEmbedding: { // Get embedding vector of the topic agent: "stringEmbeddingsAgent", - inputs: ["source.topic"], + inputs: [":source.topic"], }, similarityCheck: { // Get the cosine similarities of those vectors agent: "dotProductAgent", - inputs: ["embeddings", "topicEmbedding"], + inputs: [":embeddings", ":topicEmbedding"], }, sortedChunks: { // Sort chunks based on those similarities agent: "sortByValuesAgent", - inputs: ["chunks.contents", "similarityCheck"], + inputs: [":chunks.contents", ":similarityCheck"], }, referenceText: { // Generate reference text from those chunks (token limited) agent: "tokenBoundStringsAgent", - inputs: ["sortedChunks"], + inputs: [":sortedChunks"], params: { limit: 5000, }, @@ -66,7 +66,7 @@ const graph_data = { prompt: { // Generate a prompt with that reference text agent: "stringTemplateAgent", - inputs: ["source.query", "referenceText.content"], + inputs: [":source.query", ":referenceText.content"], params: { template: "Using the following document, ${0}\n\n${1}", }, @@ -74,12 +74,12 @@ const graph_data = { RagQuery: { // Get the answer from LLM with that prompt agent: "slashGPTAgent", - inputs: ["prompt"], + inputs: [":prompt"], }, OneShotQuery: { // Get the answer from LLM without the reference text agent: "slashGPTAgent", - inputs: ["source.query"], + inputs: [":source.query"], }, }, };