Created
February 27, 2026 00:50
-
-
Save donbr/64acaf4cca142f31d96ec03a3534b7ca to your computer and use it in GitHub Desktop.
amusing code that doesn't work
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| import requests | |
| import asyncio | |
| import nest_asyncio | |
| from typing import Annotated, Literal | |
| from typing_extensions import TypedDict | |
| from dotenv import load_dotenv | |
| # LangChain & LangGraph Imports | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.tools import tool | |
| from langchain_core.messages import HumanMessage, SystemMessage, BaseMessage | |
| from langchain_mcp_adapters.client import MultiServerMCPClient | |
| from langgraph.graph import StateGraph, START, END | |
| from langgraph.graph.message import add_messages | |
| from langgraph.prebuilt import ToolNode | |
| from langgraph.checkpoint.memory import MemorySaver | |
| # Patch asyncio to allow nesting (critical for Jupyter/Canvas environments) | |
| nest_asyncio.apply() | |
| # --- 1. ENVIRONMENT SETUP --- | |
| load_dotenv() | |
| BEARER_TOKEN = os.environ.get("X_BEARER_TOKEN") | |
| GITHUB_PAT = os.environ.get("GITHUB_PAT") | |
| OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") | |
| # Initialize LLM (GPT-4o-mini is standard for AIE9 Session 12) | |
| llm = ChatOpenAI(model="gpt-4o-mini", temperature=0) | |
| # --- 2. X API TOOLS (Activity #1 Implementation) --- | |
| @tool | |
| def search_recent_posts(query: str, max_results: int = 20) -> str: | |
| """Search recent X/Twitter posts using the v2 API. | |
| Returns posts from the last 7 days matching the query.""" | |
| url = "https://api.x.com/2/tweets/search/recent" | |
| headers = {"Authorization": f"Bearer {BEARER_TOKEN}"} | |
| params = { | |
| "query": query, | |
| "max_results": min(max(max_results, 10), 100), | |
| "tweet.fields": "created_at,public_metrics,author_id,text", | |
| "expansions": "author_id", | |
| "user.fields": "name,username", | |
| } | |
| response = requests.get(url, headers=headers, params=params) | |
| response.raise_for_status() | |
| data = response.json() | |
| tweets = data.get("data", []) | |
| if not tweets: | |
| return "No posts found." | |
| results = [] | |
| for t in tweets: | |
| results.append(f"[{t.get('created_at', '')[:10]}] {t['text']}") | |
| return "\n".join(results) | |
| @tool | |
| def get_user_posts(username: str, max_results: int = 20) -> str: | |
| """Get recent original posts (no retweets) from a specific user handle.""" | |
| query = f"from:{username} -is:retweet" | |
| return search_recent_posts.invoke({"query": query, "max_results": max_results}) | |
| @tool | |
| def get_user_profile(username: str) -> str: | |
| """Activity #1: Retrieve public profile information for an X user. | |
| Returns name, bio, followers, following, post count, and creation date. | |
| """ | |
| url = f"https://api.x.com/2/users/by/username/{username}" | |
| headers = {"Authorization": f"Bearer {BEARER_TOKEN}"} | |
| params = {"user.fields": "description,public_metrics,created_at,name,username"} | |
| response = requests.get(url, headers=headers, params=params) | |
| response.raise_for_status() | |
| user = response.json().get("data") | |
| if not user: | |
| return f"User {username} not found." | |
| m = user.get("public_metrics", {}) | |
| return ( | |
| f"Handle: @{user['username']}\nName: {user['name']}\nBio: {user['description']}\n" | |
| f"Followers: {m.get('followers_count')}\nFollowing: {m.get('following_count')}\n" | |
| f"Tweets: {m.get('tweet_count')}\nCreated: {user['created_at'][:10]}" | |
| ) | |
| # --- 3. LANGGRAPH AGENT ARCHITECTURE --- | |
| class AgentState(TypedDict): | |
| messages: Annotated[list[BaseMessage], add_messages] | |
| def build_agent(tools): | |
| # System prompt defines behavior and tool usage rules | |
| SYSTEM_PROMPT = """You are a GitHub and Social Media Agent. | |
| 1. Use X tools to find and profile users. | |
| 2. Use GitHub MCP tools to create repos, branches, and PRs. | |
| 3. Maintain a professional tone. | |
| 4. When comparing users, use your conversational memory to reference previous results.""" | |
| llm_with_tools = llm.bind_tools(tools) | |
| def call_model(state: AgentState): | |
| msgs = [SystemMessage(content=SYSTEM_PROMPT)] + state["messages"] | |
| return {"messages": [llm_with_tools.invoke(msgs)]} | |
| workflow = StateGraph(AgentState) | |
| workflow.add_node("agent", call_model) | |
| workflow.add_node("tools", ToolNode(tools)) | |
| workflow.add_edge(START, "agent") | |
| workflow.add_conditional_edges( | |
| "agent", lambda state: "tools" if state["messages"][-1].tool_calls else END | |
| ) | |
| workflow.add_edge("tools", "agent") | |
| return workflow.compile(checkpointer=MemorySaver()) | |
| # --- 4. EXECUTION WORKFLOW (Activity #2 Implementation) --- | |
| async def main(): | |
| # FIX: Initialize MultiServerMCPClient as an object, not a context manager. | |
| client = MultiServerMCPClient( | |
| { | |
| "github": { | |
| "transport": "http", | |
| "url": "https://api.githubcopilot.com/mcp/", | |
| "headers": {"Authorization": f"Bearer {GITHUB_PAT}"}, | |
| } | |
| } | |
| ) | |
| # Load MCP tools | |
| github_tools = await client.get_tools() | |
| # Merge tools | |
| x_tools = [search_recent_posts, get_user_posts, get_user_profile] | |
| all_tools = x_tools + github_tools | |
| # Build agent with memory | |
| agent = build_agent(all_tools) | |
| config = {"configurable": {"thread_id": "mcp_session_12"}} | |
| print("--- Activity #1: Testing User Profile Tool ---") | |
| profile_msg = "Get the profile of @llm_wizard and check his bio." | |
| res1 = await agent.ainvoke( | |
| {"messages": [HumanMessage(content=profile_msg)]}, config | |
| ) | |
| print(res1["messages"][-1].content) | |
| print("\n--- Activity #2: Multi-Account Comparison & GitHub Workflow ---") | |
| # Step A: Get posts from second account | |
| await agent.ainvoke( | |
| {"messages": [HumanMessage(content="Get recent posts from @karpathy.")]}, config | |
| ) | |
| # Step B: Perform comparison and GitHub automation | |
| automation_query = ( | |
| "1. Create a markdown comparison between @llm_wizard and @karpathy. " | |
| "2. Create a repo 'x-comparison-2026'. " | |
| "3. Create a branch 'add-comparison'. " | |
| "4. Commit 'comparison.md' to that branch and open a PR to main." | |
| ) | |
| res2 = await agent.ainvoke( | |
| {"messages": [HumanMessage(content=automation_query)]}, config | |
| ) | |
| print(res2["messages"][-1].content) | |
| if __name__ == "__main__": | |
| if not all([BEARER_TOKEN, GITHUB_PAT, OPENAI_API_KEY]): | |
| print("Error: Missing environment variables in .env") | |
| else: | |
| asyncio.run(main()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment