## file: https://github.com/generative-computing/mellea/blob/main/docs/examples/agents/react.py#L99
def react(
m: mellea.MelleaSession,
goal: str,
react_toolbox: ReactToolbox,
budget: int = 5,
):
assert m.ctx.is_chat_context, "ReACT requires a chat context."
test_ctx_lin = m.ctx.render_for_generation()
assert (
test_ctx_lin is not None and len(test_ctx_lin) == 0
), "ReACT expects a fresh context."
# Construct the system prompt for ReACT.
_sys_prompt = react_system_template.render(
{"today": datetime.date.today(), "tools": react_toolbox.tools}
)
# Add the system prompt and the goal to the chat history.
m.ctx.insert(mellea.stdlib.chat.Message(role="system", content=_sys_prompt))
m.ctx.insert(mellea.stdlib.chat.Message(role="user", content=f"{goal}"))
done = False
turn_num = 0
while not done:
turn_num += 1
print(f"## ReACT TURN NUMBER {turn_num}")
print(f"### Thought")
thought = m.chat(
"What should you do next? Respond with a description of the next piece of information you need or the next action you need to take."
)
print(thought.content)
print("### Action")
act = m.chat(
"Choose your next action. Respond with a nothing other than a tool name.",
# model_options={mellea.backends.types.ModelOption.TOOLS: react_toolbox.tools_dict()},
format=react_toolbox.tool_name_schema(),
)
selected_tool: ReactTool = react_toolbox.get_tool_from_schema(
act.content)
print(selected_tool.get_name())
print(f"### Arguments for action")
act_args = m.chat(
"Choose arguments for the tool. Respond using JSON and include only the tool arguments in your response.",
format=selected_tool.args_schema(),
)
print(
f"```json\n{json.dumps(json.loads(act_args.content), indent=2)}\n```")
# TODO: handle exceptions.
print("### Observation")
tool_output = react_toolbox.call_tool(selected_tool, act_args.content)
m.ctx.insert(
mellea.stdlib.chat.Message(role="tool", content=tool_output)
)
print(tool_output)
is_done = IsDoneModel.model_validate_json(
m.chat(
f"Do you know the answer to the user's original query ({goal})? If so, respond with Yes. If you need to take more actions, then respond No.",
format=IsDoneModel,
).content
).is_done
if is_done:
print("Done. Will summarize and return output now.")
done = True
return m.chat(
f"Please provide your final answer to the original query ({goal})."
).content
elif turn_num == budget:
return None