diff --git a/libs/langgraph/src/prebuilt/tool_node.ts b/libs/langgraph/src/prebuilt/tool_node.ts index 5b1f1480..9e9dc8d7 100644 --- a/libs/langgraph/src/prebuilt/tool_node.ts +++ b/libs/langgraph/src/prebuilt/tool_node.ts @@ -17,13 +17,41 @@ export type ToolNodeOptions = { }; /** - * A node that runs the tools requested in the last AIMessage. It can be used - * either in StateGraph with a "messages" key or in MessageGraph. If multiple - * tool calls are requested, they will be run in parallel. The output will be - * a list of ToolMessages, one for each tool call. + * The prebuilt ToolNode executes the provided functions when requested by an LLM as tool_calls. + * + * Key expectations: + * 1. Input: Expects either a state object with a messages key containing a list of BaseMessages, or a list of messages directly. + * The last message **must** be an AIMessage containing `tool_call`'s. + * 2. Tool Execution: Processes all tool calls found in the last AIMessage, executing them in parallel. + * 3. Output: Returns either an array of `ToolMessage`'s or an object with a messages key containing the `ToolMessage`'s, depending on the input type. + * 4. Error Handling: Throws errors for invalid inputs (non-AIMessage) or if a requested tool is not found. + * + * Typical usage: + * - Construct the ToolNode with the same list of tools (functions) provided to the LLM for tool calling. + * - Ensure the AI model is aware of and can request the tools available to the ToolNode (e.g., by calling .llm.bind_tools(tools)) + * - Route to the tool node only if the last message contains tool calls. + * + * @typeparam T - Optional: the type of input, either an array of `BaseMessage` or `MessagesState`. + * + * @example + * ```typescript + * import { ToolNode, toolsCondition } from "@langchain/langgraph/prebuilt"; + * import { StateGraph, MessagesAnnotation } from "@langchain/langgraph"; + * import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; + * + * const tools = [new TavilySearchResults({ maxResults: 1 })]; + * const toolNode = new ToolNode(tools); + * + * const workflow = new StateGraph(MessagesAnnotation) + * .addNode("agent", callModel) // contains an LLM call that will emit an AIMessage with tool_calls + * .addNode("tools", toolNode) + * .addConditionalEdges("agent", toolsCondition) + * .addEdge("tools", "agent"); // After tools are executed, return to the agent to summarize results. + * ``` */ // eslint-disable-next-line @typescript-eslint/no-explicit-any export class ToolNode extends RunnableCallable { + /** The array of tools available for execution. */ tools: (StructuredToolInterface | RunnableToolLike)[]; handleToolErrors = true; @@ -87,6 +115,23 @@ export class ToolNode extends RunnableCallable { } } +/** + * Determines whether to route to the `tools` node or to end the graph execution. + * This function is typically used in conjunction with ToolNode to control the flow in a graph. + * + * @param state - Either an array of BaseMessage or a MessagesState object. + * @returns "tools" if there are tool calls in the last message, otherwise returns END. + * + * @example + * ```typescript + * const state = [new AIMessage({ + * content: "We need to search for information.", + * tool_calls: [{ name: "search", args: { query: "LangChain usage" }, id: "tc_1" }] + * })]; + * const result = toolsCondition(state); + * console.log(result); // "tools" + * ``` + */ export function toolsCondition( state: BaseMessage[] | typeof MessagesAnnotation.State ): "tools" | typeof END {