Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Select an option

  • Save jeffmylife/6b9e4b39439b8e0053b5674ca13cd4ff to your computer and use it in GitHub Desktop.

Select an option

Save jeffmylife/6b9e4b39439b8e0053b5674ca13cd4ff to your computer and use it in GitHub Desktop.
LangChain with Tools and Chat History Guide

LangChain with Tools and Chat History: A Guide

Welcome to this guide on using LangChain with tools and chat history management. This guide will explain how to set up and use tools with LangChain, manage chat history, and handle tool interactions within your conversation flow.

Table of Contents

  1. Overview
  2. Setting Up Your Environment
  3. Tools Overview
  4. Handling Chat History
  5. Message Flow with Tools

1. Overview

In this guide, we'll cover:

• Using LangChain's pattern for building chat-based applications
• Setting up and using tools with LangChain
• Managing chat history
• Processing tool calls and their results


2. Setting Up Your Environment

Before you begin, make sure you have:

• Required dependencies:
– langchain
– langchain_core
– Your chosen LLM provider's library (e.g., langchain_anthropic, langchain_openai)

pip install langchain langchain-core langchain-anthropic langchain-openai

3. Tools Overview

Tools in LangChain are defined using the Tool class. Here's how to create tools:

from langchain.tools import Tool
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
from langchain_core.messages import (
    SystemMessage,
    HumanMessage,
    AIMessage,
    ToolMessage
)

def search(query: str) -> str:
    """Implement your search logic here"""
    return f"Results for: {query}"

def write_file(input_dict: dict) -> str:
    """Implement your file writing logic here"""
    return f"Wrote to {input_dict['file_path']}"

def read_file(file_path: str) -> str:
    """Implement your file reading logic here"""
    return f"Content from {file_path}"

tools = [
    Tool(
        name="Search",
        func=search,
        description="useful for searching the internet to find answers to questions"
    ),
    Tool(
        name="WriteFile",
        func=write_file,
        description="Write content to a file. Input should be a dictionary with 'file_path' and 'text' keys."
    ),
    Tool(
        name="ReadFile",
        func=read_file,
        description="Read content from a file. Input should be a file path."
    )
]

To use tools with your LLM:

# For Anthropic's Claude
llm = ChatAnthropic(model_name="claude-3-sonnet-20240229")
# Or for OpenAI's GPT models
# llm = ChatOpenAI(model_name="gpt-4")
llm_with_tools = llm.bind_tools(tools)

4. Handling Chat History

Message Types

LangChain uses different message types for chat history:

  • SystemMessage: Initial instructions/context
  • HumanMessage: User inputs
  • AIMessage: Model responses
  • ToolMessage: Tool execution results

Chat History Structure

from langchain_core.messages import (
    SystemMessage,
    HumanMessage,
    AIMessage,
    ToolMessage
)

messages = [
    SystemMessage(content="You are a helpful assistant that can use tools."),
    HumanMessage(content="Hello"),
    AIMessage(content="Hi, how can I help?"),
    HumanMessage(content="Search for 'LangChain'"),
    AIMessage(content="", additional_kwargs={"tool_calls": [{
        "id": "call_1",
        "type": "function",
        "function": {
            "name": "Search",
            "arguments": "{}"
        }
    }]}),
    ToolMessage(
        content="Results for: LangChain",
        tool_call_id="call_1",
        name="Search"
    ),
    AIMessage(content="Here are the results I found...")
]

Getting Chat History

To retrieve chat history in a user-friendly format:

def get_chat_history(messages):
    """Return chat history, skipping system messages and empty tool calls."""
    history = []
    for msg in messages[1:]:  # Skip system message
        # Skip empty AI messages (those only containing tool calls)
        if isinstance(msg, AIMessage) and not msg.content and hasattr(msg, 'tool_calls'):
            continue
            
        message_dict = {
            "role": msg.type,
            "content": msg.content or "",
            "sender": "human" if isinstance(msg, HumanMessage) else (
                "assistant" if isinstance(msg, AIMessage) else "tool"
            )
        }
        if isinstance(msg, ToolMessage):
            message_dict["tool_name"] = getattr(msg, "tool_name", "unknown")
        
        if message_dict["content"].strip():
            history.append(message_dict)
    return history

Setting Chat History

To load existing chat history:

def set_chat_history(messages_list, system_prompt="You are a helpful assistant that can use tools."):
    """Load chat history from a list of message dictionaries."""
    messages = [SystemMessage(content=system_prompt)]
    tool_message_counter = 0
    
    i = 0
    while i < len(messages_list):
        msg = messages_list[i]
        role = msg["role"].lower()
        content = msg["content"]
        
        if role == "human":
            messages.append(HumanMessage(content=content))
        elif role == "assistant":
            # Check if next message is a tool message
            if (i + 1 < len(messages_list) and 
                messages_list[i + 1]["role"].lower() == "tool"):
                # Get tool info from next message
                tool_msg = messages_list[i + 1]
                tool_name = tool_msg.get("tool_name", "unknown")
                tool_call_id = f"call_{tool_message_counter}"
                
                # Add AI message with tool use
                messages.append(AIMessage(
                    content="",
                    additional_kwargs={"tool_calls": [{
                        "id": tool_call_id,
                        "type": "function",
                        "function": {
                            "name": tool_name,
                            "arguments": "{}"
                        }
                    }]}
                ))
                
                # Add tool result message
                messages.append(ToolMessage(
                    content=tool_msg["content"],
                    tool_call_id=tool_call_id,
                    name=tool_name
                ))
                
                # Add AI response if it has content
                if content.strip():
                    messages.append(AIMessage(content=content))
                
                tool_message_counter += 1
                i += 1  # Skip the next message (tool) since we've handled it
            else:
                # Regular assistant message
                messages.append(AIMessage(content=content))
        
        i += 1
    
    return messages

5. Message Flow with Tools

Here's how to handle a new message with potential tool usage:

async def handle_message(messages, llm_with_tools, user_input):
    """Process a user message and return the assistant's response."""
    # Add user message
    messages.append(HumanMessage(content=user_input))
    
    try:
        # Get initial response from LLM
        ai_msg = await llm_with_tools.ainvoke(messages)
        
        # Check if the AI wants to use tools
        if hasattr(ai_msg, 'tool_calls') and ai_msg.tool_calls:
            # Add AI message with tool calls
            messages.append(AIMessage(
                content="",
                additional_kwargs={"tool_calls": [{
                    "id": tool_call["id"],
                    "type": "function",
                    "function": {
                        "name": tool_call["name"],
                        "arguments": "{}"
                    }
                } for tool_call in ai_msg.tool_calls]}
            ))
            
            # Process each tool call
            for tool_call in ai_msg.tool_calls:
                tool_name = tool_call["name"]
                tool_call_id = tool_call["id"]
                
                # Find and execute the tool
                tool = next((t for t in tools if t.name.lower() == tool_name.lower()), None)
                if tool:
                    args = tool_call.get('args', {})
                    query = args.get('__arg1', '')
                    result = tool.invoke(query)
                    
                    # Add tool result
                    messages.append(ToolMessage(
                        content=str(result),
                        tool_call_id=tool_call_id,
                        name=tool_name
                    ))
            
            # Get final response after tool usage
            final_response = await llm_with_tools.ainvoke(messages)
            content = final_response.content
        else:
            # If no tool calls, use the direct response
            content = ai_msg.content
        
        # Add final response to history
        messages.append(AIMessage(content=content))
        return content
            
    except Exception as e:
        return f"Error processing message: {str(e)}"

Best Practices

• Always include a system message to define the assistant's behavior and available tools
• When using tools, ensure the model knows they're available via the system message
• After a tool call, re-invoke the LLM to incorporate the tool's result
• When loading chat history, watch for the pattern: empty assistant message → tool message → final assistant message
• Keep track of tool call IDs to properly link tool calls with their results

from langchain.tools import Tool
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
from langchain_core.messages import (
SystemMessage,
HumanMessage,
AIMessage,
ToolMessage
)
# Define a simple test tool
def test_search(query: str) -> str:
"""Simple mock search function"""
print(f"\n🔍 Tool 'Search' was called with query: {query}")
result = f"Mock search results for: {query}"
print(f"🔍 Tool returned: {result}")
return result
# Create tool
tools = [
Tool(
name="Search",
func=test_search,
description="useful for searching to find answers to questions"
)
]
async def run_test(llm, llm_name):
print(f"\nTesting with {llm_name}:")
print("-" * 50)
print("1️⃣ Binding tools to LLM...")
llm_with_tools = llm.bind_tools(tools)
print("2️⃣ Initializing conversation...")
# Initialize messages with system prompt
messages = [
SystemMessage(content="You are a helpful assistant that can use tools.")
]
# Simulate a conversation with tool usage
user_message = "Search for information about LangChain"
print(f"3️⃣ Sending user message: '{user_message}'")
messages.append(HumanMessage(content=user_message))
# Get response from LLM
print("4️⃣ Getting initial LLM response...")
ai_msg = await llm_with_tools.ainvoke(messages)
# Check if tool usage is requested
if hasattr(ai_msg, 'tool_calls') and ai_msg.tool_calls:
print("5️⃣ LLM requested to use tools!")
# Add AI message with tool call
messages.append(AIMessage(
content="",
additional_kwargs={"tool_calls": [{
"id": "call_1",
"type": "function",
"function": {
"name": "Search",
"arguments": "{}"
}
}]}
))
print("6️⃣ Executing tool call...")
# Add tool result
messages.append(ToolMessage(
content=test_search("LangChain"),
tool_call_id="call_1",
name="Search"
))
print("7️⃣ Getting final LLM response...")
# Get final response
final_response = await llm_with_tools.ainvoke(messages)
print("\n🤖 Final response:", final_response.content)
else:
print("\n❌ LLM did not request to use tools")
print("🤖 Direct response:", ai_msg.content)
# Test chat history retrieval
def get_chat_history(messages):
history = []
for msg in messages[1:]: # Skip system message
if isinstance(msg, AIMessage) and not msg.content and hasattr(msg, 'tool_calls'):
continue
message_dict = {
"role": msg.type,
"content": msg.content or "",
"sender": "human" if isinstance(msg, HumanMessage) else (
"assistant" if isinstance(msg, AIMessage) else "tool"
)
}
if isinstance(msg, ToolMessage):
message_dict["tool_name"] = getattr(msg, "name", "unknown")
if message_dict["content"].strip():
history.append(message_dict)
return history
print("\n📝 Chat History:")
for msg in get_chat_history(messages):
print(f"{msg['sender']}: {msg['content']}")
async def main():
# Test with Claude
claude = ChatAnthropic(model_name="claude-3-5-sonnet-20241022")
await run_test(claude, "Claude")
# Test with GPT-4
gpt4 = ChatOpenAI(model_name="gpt-4o-mini")
await run_test(gpt4, "GPT-4")
if __name__ == "__main__":
import asyncio
asyncio.run(main())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment