Skip to content

Instantly share code, notes, and snippets.

@Miyamura80
Created November 13, 2025 00:32
Show Gist options
  • Select an option

  • Save Miyamura80/f5c7def8ba877889a09ba5589630c9c3 to your computer and use it in GitHub Desktop.

Select an option

Save Miyamura80/f5c7def8ba877889a09ba5589630c9c3 to your computer and use it in GitHub Desktop.
2022, 2023 Era ChatGPT Clone Implementation Comparison with DSPY, OpenAI Agents, Langchain
import dspy
from ddgs import DDGS
from dotenv import load_dotenv
load_dotenv()
QUESTION = "What is the projected future demand for high bandwidth memory?"
def search_web(query: str) -> str:
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=3))
return "\n\n".join([f"{r['title']}: {r['href']} - {r['body']}" for r in results])
# 2022 Era ChatGPT, no up-to-date info
lm_no_search = dspy.LM("openai/gpt-4o-mini", temperature=0)
predictor_no_search = dspy.Predict("question -> response: str")
result_no_search = predictor_no_search(question=QUESTION, lm=lm_no_search)
print(f"Response without web search: {result_no_search.response}")
# 2023 Era ChatGPT, with web search
lm_with_search = dspy.LM("openai/gpt-4o-mini", temperature=0)
predictor_with_search = dspy.ReAct(
"question -> response: str, relevant_sources: list[str]",
tools=[search_web],
)
result_with_search = predictor_with_search(question=QUESTION, lm=lm_with_search)
print(f"\nResponse with web search: {result_with_search.response}")
print(f"Sources (with search): {result_with_search.relevant_sources}")
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langchain.agents import create_agent
from ddgs import DDGS
from dotenv import load_dotenv
from pydantic import BaseModel, Field
load_dotenv()
QUESTION = "What is the projected future demand for high bandwidth memory?"
@tool
def search_web(query: str) -> str:
"""Search the web for information using DuckDuckGo."""
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=3))
return "\n\n".join([f"{r['title']}: {r['href']} - {r['body']}" for r in results])
class AgentResponse(BaseModel):
response: str = Field(description="The answer to the question")
relevant_sources: list[str] = Field(
description="List of relevant source URLs used to answer the question"
)
# 2022 Era ChatGPT, no up-to-date info
llm_no_search = ChatOpenAI(model="gpt-4o-mini", temperature=0)
result_no_search = llm_no_search.invoke(
[
("system", "Answer questions."),
("user", QUESTION),
]
)
print(f"Response without web search: {result_no_search.content}")
# 2023 Era ChatGPT, with web search
llm_with_search = ChatOpenAI(model="gpt-4o-mini", temperature=0)
agent_with_search = create_agent(
llm_with_search,
[search_web],
)
result_with_search = agent_with_search.invoke(
{
"messages": [
(
"system",
"Answer questions using web search when needed. Return your answer in JSON format with 'response' and 'relevant_sources' fields.",
),
("user", QUESTION),
]
}
)
final_message = result_with_search["messages"][-1]
final_response_text = (
final_message.content if hasattr(final_message, "content") else str(final_message)
)
structured_llm = llm_with_search.with_structured_output(AgentResponse)
structured_result = structured_llm.invoke(
[
(
"system",
"Extract the response and relevant sources from the agent's output.",
),
("user", f"Agent output:\n{final_response_text}"),
]
)
print(f"\nResponse with web search: {structured_result.response}")
print(f"Sources (with search): {structured_result.relevant_sources}")
# Linter will ignore these directories
IGNORE_LINT_DIRS = .venv|venv
LINE_LENGTH = 88
install_tools:
@echo "$(YELLOW)đź”§Installing tools...$(RESET)"
@uv tool install black --force
@uv tool install ruff --force
@uv tool install ty --force
@uv tool install vulture --force
@echo "$(GREEN)âś…Tools installed.$(RESET)"
fmt: install_tools
@echo "$(YELLOW)✨Formatting project with Black...$(RESET)"
@uv tool run black --exclude '/($(IGNORE_LINT_DIRS))/' . --line-length $(LINE_LENGTH)
from agents import Agent, Runner, function_tool, ModelSettings
from ddgs import DDGS
from dotenv import load_dotenv
from pydantic import BaseModel, Field
load_dotenv()
QUESTION = "What is the projected future demand for high bandwidth memory?"
@function_tool
def search_web(query: str) -> str:
with DDGS() as ddgs:
results = list(ddgs.text(query, max_results=3))
return "\n\n".join([f"{r['title']}: {r['href']} - {r['body']}" for r in results])
class AgentResponse(BaseModel):
response: str = Field(description="The answer to the question")
relevant_sources: list[str] = Field(
description="List of relevant source URLs used to answer the question"
)
# 2022 Era ChatGPT, no up-to-date info
agent_no_search = Agent(
name="Assistant",
instructions="Answer questions.",
model="gpt-4o-mini",
model_settings=ModelSettings(temperature=0),
)
result_no_search = Runner.run_sync(agent_no_search, QUESTION)
print(f"Response without web search: {result_no_search.final_output}")
# 2023 Era ChatGPT, with web search
agent_with_search = Agent(
name="Assistant",
instructions="Answer questions using web search when needed. Return your answer in JSON format with 'response' and 'relevant_sources' fields.",
tools=[search_web],
output_type=AgentResponse,
model="gpt-4o-mini",
model_settings=ModelSettings(temperature=0),
)
result_with_search = Runner.run_sync(agent_with_search, QUESTION)
print(f"\nResponse with web search: {result_with_search.final_output.response}")
print(f"Sources (with search): {result_with_search.final_output.relevant_sources}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment