Skip to content

Instantly share code, notes, and snippets.

@justinhennessy
Last active August 8, 2024 06:36
Show Gist options
  • Select an option

  • Save justinhennessy/784029ec641ad6746197196ef59a80c2 to your computer and use it in GitHub Desktop.

Select an option

Save justinhennessy/784029ec641ad6746197196ef59a80c2 to your computer and use it in GitHub Desktop.
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain.memory import ConversationBufferMemory
# Initialize the LLM
llm = ChatOpenAI(api_key="xxx")
# Define a prompt template
prompt = ChatPromptTemplate.from_messages([
MessagesPlaceholder(variable_name="history"),
("human", "{input}")
])
# Create the chain
chain = prompt | llm
# Create a simple message history
conversation_memory = ConversationBufferMemory()
# Create RunnableWithMessageHistory
conversation_chain = RunnableWithMessageHistory(
chain,
get_session_history=lambda: conversation_memory.chat_memory,
input_messages_key="input",
verbose=True,
history_messages_key="history"
)
# Example usage
while True:
user_input = input(f"\n\nUser: ")
if user_input.lower() == 'exit':
break
result = conversation_chain.invoke({"input": user_input})
print(f"AI:", result.content)
print(f"\n\n******\n{conversation_memory.chat_memory}\n******")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment