Skip to content

Instantly share code, notes, and snippets.

@vkryukov
Created December 9, 2025 22:23
Show Gist options
  • Select an option

  • Save vkryukov/4cf83f2670d50084e02376795c104055 to your computer and use it in GitHub Desktop.

Select an option

Save vkryukov/4cf83f2670d50084e02376795c104055 to your computer and use it in GitHub Desktop.
# This script demonstrates the difference in behavior when using tools with
# streaming vs regular calls.
alias ReqLLM.Context
{:ok, planet_weather_tool} =
ReqLLM.Tool.new(
name: "planet_weather",
description: "Gets weather on a planet",
parameter_schema: [planet: [type: :string, required: true]],
callback: fn args ->
case args do
%{planet: "Mars"} -> {:ok, "dry and cold"}
%{planet: "Mercury"} -> {:ok, "dry and warm"}
%{planet: "Earth"} -> {:ok, "just perfect"}
_ -> {:ok, "humans can't survive there"}
end
end
)
defmodule LlmTools do
def generate_text(model, context, tools, opts) do
streaming? = Keyword.get(opts, :streaming?, false)
result =
if streaming? do
{:ok, stream_response} = ReqLLM.stream_text(model, context, tools: tools)
ReqLLM.StreamResponse.process_stream(stream_response)
else
ReqLLM.generate_text(model, context, tools: tools)
end
case result do
{:ok, %ReqLLM.Response{finish_reason: :tool_calls} = response} ->
tool_calls = ReqLLM.Response.tool_calls(response)
IO.inspect(tool_calls, label: "Tool calls")
new_context = Context.execute_and_append_tools(response.context, tool_calls, tools)
generate_text(model, new_context, tools, opts)
{:ok, %ReqLLM.Response{finish_reason: :stop} = response} ->
IO.puts("#{ReqLLM.Response.text(response)}")
{:ok, response}
non_tool_call ->
non_tool_call
end
end
def generate_dialogue(model, questions, tools, opts \\ []) do
initial_context =
Context.new([
Context.system(
"When asked about the weather on different planets, only use tool calls and don't use any of your own knowledge"
)
])
questions
|> Enum.reduce(initial_context, fn question, context ->
new_context = Context.append(context, Context.user(question))
{:ok, response} = generate_text(model, new_context, tools, opts)
response.context
end)
end
end
streaming? = System.get_env("STREAMING", nil)
model = System.get_env("MODEL", "xai:grok-4-1-fast")
LlmTools.generate_dialogue(
model,
[
"What is the weather on Mars?",
"And what is the weather on Mercury?",
"How about Earth and Moon?"
],
[planet_weather_tool],
streaming?: streaming?
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment