Spaces:
Runtime error
Runtime error
File size: 2,859 Bytes
4826e54 38812af 4826e54 38812af 4826e54 38812af 4826e54 38812af 4826e54 38812af 4826e54 38812af 4826e54 9a62fc6 4826e54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import os
from typing import TypedDict, Annotated
from dotenv import load_dotenv
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
from langgraph.prebuilt import ToolNode
from langgraph.graph import START, StateGraph
from langgraph.prebuilt import tools_condition
from langchain_openai import AzureChatOpenAI
from retriever import guest_info_tool
from tools import weather_info_tool, hub_stats_tool, news_search_tool
load_dotenv()
chat = AzureChatOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
deployment_name=os.getenv("DEPLOYMENT_NAME"),
openai_api_version=os.getenv("OPENAI_API_VERSION"),
temperature=0.75,
streaming=True,
verbose=True
)
tools = [guest_info_tool, weather_info_tool, hub_stats_tool, news_search_tool]
chat_with_tools = chat.bind_tools(tools)
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
def assistant(state: AgentState):
return {
"messages": [chat_with_tools.invoke(state["messages"])],
}
# The graph
builder = StateGraph(AgentState)
# Define nodes: these do the work
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message requires a tool, route to tools
# Otherwise, provide a direct response
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile with debug mode for verbosity
alfred = builder.compile(debug=True)
messages = [HumanMessage(
content="One of our guests is from Qwen. What can you tell me about their most recent popular AI model(search about it )?")]
print("๐ Starting Agent Execution...")
print("="*50)
# Use stream instead of invoke to see step-by-step execution
final_messages = None
for step in alfred.stream({"messages": messages}):
print(f"๐ Current Step: {list(step.keys())}")
for node_name, node_output in step.items():
print(f"๐ง Node '{node_name}' output:")
if 'messages' in node_output:
latest_message = node_output['messages'][-1]
# Keep track of final messages
final_messages = node_output['messages']
print(f" Type: {type(latest_message).__name__}")
if hasattr(latest_message, 'content'):
print(f" Content: {latest_message.content[:200]}...")
if hasattr(latest_message, 'tool_calls') and latest_message.tool_calls:
print(f" Tool Calls: {latest_message.tool_calls}")
print("-" * 30)
print("\n"*3)
print("="*50)
print("๐ฉ Alfred's Final Response:")
if final_messages:
print(final_messages[-1].content)
|