How to Programmatically Trigger Tool Calls and Manage Messages Using Vercel's AI SDK

I’m using Vercel’s AI SDK in my agent.ts file to handle chat interactions and execute specific tools based on user inputs. The file streams AI-generated responses and triggers tool calls like create_streamlit_app for generating Streamlit code. I need guidance on which functions or methods within the package can be used to programmatically initiate tool calls and manage message processing effectively. Any insights or examples would be greatly appreciated.

agent.ts

import { streamObject, CoreUserMessage, ToolCall, ToolResult } from 'ai'
import { generateCode } from './tools'
import { CSVAnalysis, StreamChunk } from './types'

// ... existing code ...

export class GruntyAgent {
    // ... existing properties and constructor ...

    async *chat(
        chatId: string,
        userId: string,
        latestMessage: string,
        tools: Tool[],
        temperature: number,
        maxTokens: number,
        csvAnalysis?: CSVAnalysis
    ): AsyncGenerator<StreamChunk> {
        // ... existing code ...

        const streamResult = await streamObject({
            model: modelClient as LanguageModelV1,
            output: 'array',
            system: this.roleDescription,
            schema: messageSchema,
            messages: sanitizedMessages,
            maxTokens: maxTokens,
            onFinish: async () => {
                await streamResult.usage.then(recordTokenUsage);
                await this.storeMessage(
                    chatId,
                    userId,
                    latestMessage,
                    accumulatedResponse.trim(),
                    this.totalTokens,
                    toolCalls,
                    toolResults
                )
            }
        })

        for await (const events of streamResult.partialObjectStream) {
            for (const event of events) {
                if (event.type === 'text') {
                    accumulatedResponse += event.content;
                    yield event as StreamChunk;

                    if (event.content.includes('create a Streamlit app')) {
                        console.log('Keyword detected for Streamlit app creation.');

                        const toolCallEvent = {
                            toolCallId: 'create-streamlit-app',
                            toolName: 'create_streamlit_app',
                            args: { query: 'Analyze CSV and create Streamlit app' }
                        } as ToolCall<string, any>;

                        toolCalls.push(toolCallEvent);
                        console.log('Tool Call Manually Triggered:', toolCallEvent);

                        try {
                            const codeQuery = `
                                ${toolCallEvent.args.query}
                                Use the following CSV analysis to inform your code:
                                ${JSON.stringify(csvAnalysis, null, 2)}
                            `;

                            const { generatedCode, codeTokenCount } = await generateCode(codeQuery);
                            this.codeTokens += codeTokenCount;

                            const toolResultEvent = {
                                toolCallId: toolCallEvent.toolCallId,
                                toolName: toolCallEvent.toolName,
                                result: generatedCode,
                                args: toolCallEvent.args
                            } as ToolResult<string, any, any>;

                            toolResults.push(toolResultEvent);
                            console.log('Tool Result Manually Triggered:', toolResultEvent);
                        } catch (error) {
                            console.error('Error generating code:', error);
                        }
                    }
                }
                // ... existing code ...
            }
        }
    }
    // ... existing methods ...
}

tools.ts

import { Anthropic } from '@anthropic-ai/sdk'
import { z } from 'zod'
import { Tool } from './types'

const codeGenerationAnthropicAgent = new Anthropic({
    apiKey: process.env.ANTHROPIC_API_KEY,
})

const streamlitAppSchema = z.object({
    query: z.string().min(1, 'Query cannot be empty').describe(
        'Explain the requirements for the Streamlit code you want to generate.'
    ),
})

export const tools: Tool[] = [
    {
        name: 'create_streamlit_app',
        description: 'Generates Python (Streamlit) code based on a given query',
        inputSchema: streamlitAppSchema,
        parameters: streamlitAppSchema,
        execute: async (input) => {
            const { query } = streamlitAppSchema.parse(input)
            return generateCode(query)
        },
    },
]

export async function generateCode(
    query: string
): Promise<{ generatedCode: string; codeTokenCount: number }> {
    if (!query || !query.trim()) {
        throw new Error('Query cannot be empty or just whitespace.')
    }

    console.log('Sending query to LLM:', query)

    try {
        const response = await codeGenerationAnthropicAgent.messages.create({
            model: 'claude-3-5-sonnet-20240620',
            max_tokens: 2000,
            system: 'You are a Python code generation assistant specializing in Streamlit apps.',
            messages: [{ role: 'user', content: query }],
        })

        if (Array.isArray(response.content) && response.content.length > 0) {
            const generatedCode =
                response.content[0].type === 'text'
                    ? response.content[0].text.replace(/^```python/, '').replace(/```$/, '')
                    : ''
            return {
                generatedCode,
                codeTokenCount: response.usage.input_tokens + response.usage.output_tokens,
            }
        } else {
            console.error('Unexpected response format:', response)
            throw new Error('Unexpected response format from code generation API')
        }
    } catch (error) {
        console.error('Error generating code:', error)
        throw new Error('Failed to generate code. Please check the query and try again.')
    }
}

These snippets illustrate how the agent.ts file handles chat interactions and triggers tool calls, while tools.ts defines the tool and its execution logic.

1 Like

In the terminal, I’m seeing logs indicating that tool calls and results are being processed correctly. However, I need to understand which function I can use to render these interactions on the frontend. I’m considering using useChat, but I would appreciate expert advice on the best approach to achieve this.

Summary of Terminal Log:

The terminal log shows that a tool call was manually triggered to create a Streamlit app for CSV analysis. The query was sent to the language model, and a tool result was generated, which includes Python code for a Streamlit app. This app allows users to upload a CSV file, view data summaries, and visualize data with various plots.

Tool Call Manually Triggered: {
  toolCallId: 'create-streamlit-app',
  toolName: 'create_streamlit_app',
  args: { query: 'Analyze CSV and create Streamlit app' }
}
Sending query to LLM: Analyze CSV and create Streamlit app
Tool Result Manually Triggered: {
  toolCallId: 'create-streamlit-app',
  toolName: 'create_streamlit_app',
  result: 'import streamlit as st\nimport pandas as pd\n...st.write("Please upload a CSV file to begin analysis.")',
  args: { query: 'Analyze CSV and create Streamlit app' }
}
Prompt tokens: 656
Completion tokens: 1763
Total tokens: 2419
Message stored successfully with ID: [
  {
    id: '176e12b1-2660-46d7-08-6c9ea60c8b07',
    chat_id: '5d0e9b73-a811-0a-a7ed-11af4a4b9182',
    user_id: 'afe129c0-d6e2-77-a663-d9aa93458243',
    tool_calls: [ [Object] ],
    tool_results: [ [Object] ],
    user_message: `I've uploaded a CSV file...`,
    assistant_message: "Sure! I'll create a Streamlit app to visualize the data. // then it generates the whole code for the assistant message which isn't i am looking here to achieve",
    token_count: 2419,
    created_at: '2024-10-22T23:38:47.145982+00:00'
  }
]

Hey!
New here but,

I think using useChat might be a good idea. To render the interactions on the frontend, the function or method you’d typically use would depend on how you’re handling the chat data flow.

I found the closest examples over here:

To handle the specific tool calls like generating the Streamlit app, you can create a custom method in your chat logic to trigger these tool interactions when certain keywords or commands are detected.

probably something like:

if (message.includes('create a Streamlit app')) {
  const toolCall = {
    toolCallId: 'create-streamlit-app',
    toolName: 'create_streamlit_app',
    args: { query: 'any querty' }
  };

When a tool call is triggered , the result can be streamed and displayed in the chat using the useChat hook.

For examples, your agent detects that a tool call should be triggered (as in your logs) and the result of the tool call can be passed into messages and rendered in the chat interface for the user.

2 Likes

Hey! Friend,

Sorry for replying late but i’ve got it working this way, worked smoothly! Thanks for the help man!

       const stream = await streamText({
            model: this.model,
            tools: Tools,
            system: this.roleDescription,
            messages: sanitizedMessages,
            maxTokens: maxTokens,
            maxSteps: 10,
            toolChoice: 'auto',
            onStepFinish: (event) => {
                if (event.stepType === 'initial') {
                    currentMessage = event.text

                    currentContentBlock = event
                    accumulatedJson = ''
                }

                if (event.usage) {
                    this.inputTokens = event.usage.promptTokens
                }
            },
        })

        for await (const event of stream.fullStream) {
            yield event as StreamChunk

            if (event.type === 'text-delta') {
                accumulatedResponse += event.textDelta

            } else if (event.type === 'tool-call') {
                toolCalls.push({
                    toolCallId: event.toolCallId,
                    toolName: event.toolName,
                    args: event.args,
                })

                if (event.toolName === 'create_streamlit_app') {
                    try {
                        const codeQuery = `
                            ${event.args.query}
                            Use the following CSV analysis to inform your code:
                            ${JSON.stringify(csvAnalysis, null, 2)}
                        `
                        const { generatedCode, codeTokenCount } =
                            await generateCode(codeQuery)
                        this.codeTokens += codeTokenCount

                        yield {
                            type: 'generated_code',
                            content: generatedCode,
                        } as StreamChunk

                        toolResults.push({
                            toolCallId: event.toolCallId,
                            toolName: 'create_streamlit_app',
                            result: generatedCode,
                            args: {},
                        })
                    } catch (error) {
                        console.error('Error generating Streamlit code:', error)
                        yield {
                            type: 'error',
                            content: 'Error in code generation process',
                        } as StreamChunk
                    }
                }
            } else if (event.type === 'tool-call-delta') {
                // Handle streaming tool call updates
                if (event.argsTextDelta) {
                    accumulatedJson += event.argsTextDelta
                }
            } else if (event.type === 'tool-call-streaming-start') {
                // Reset accumulated JSON when a new tool call starts streaming
                accumulatedJson = ''
            } else if (event.type === 'step-finish') {
                yield {
                    type: 'text_chunk',
                    content: accumulatedResponse,
                } as StreamChunk

                // Handle step completion if needed
                if (event.usage) {
                    this.inputTokens = event.usage.promptTokens
                }
            } else if (event.type === 'finish') {
                if (event.usage) {
                    this.outputTokens = event.usage.completionTokens
                }

                console.log('Storing Message for chatId:', chatId)
                const totalTokenCount =
                    this.outputTokens + this.codeTokens + this.inputTokens
                console.log(
                    'Total Token Count (output, code, input):',
                    this.outputTokens,
                    this.codeTokens,
                    this.inputTokens
                )

                await stream.usage.then(recordTokenUsage)
                await this.storeMessage(
                    chatId,
                    userId,
                    latestMessage,
                    accumulatedResponse.trim(),
                    this.totalTokens,
                    toolCalls,
                    toolResults
                )

                // Reset all accumulators
                currentMessage = null
                accumulatedResponse = ''
                generatedCode = ''
                toolCalls = []
                toolResults = []
            } else if (event.type === 'error') {
                console.error('Stream error:', event)
                yield {
                    type: 'error',
                    content: 'An error occurred during processing',
                } as StreamChunk
            }
        }
    }
3 Likes

Hey! Apologies for the delay response but I’m so very glad for you that you got it working! Kudos! PS - does it not feel like an absolute delight when you solve a bug or an issue and get your code working? :star_struck:

1 Like

This topic was automatically closed 30 days after the last reply. New replies are no longer allowed.