Files
agent/examples/qwen_langgraph_react.py
2025-08-30 23:28:48 +08:00

66 lines
1.8 KiB
Python

"""
Minimal LangGraph ReAct agent using Qwen3-Coder(-Flash) via OpenAI-compatible API.
Prereqs:
pip install -U langgraph langchain langchain-openai
Env:
export QWEN_API_KEY=sk-...
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1 # or your gateway
export QWEN_MODEL=qwen3-coder-flash # use console's actual model name
"""
import sys,os
from typing import Optional
from langgraph.prebuilt import create_react_agent
from langchain_core.messages import HumanMessage
try:
from langchain.tools import tool
except Exception: # older versions
from langchain_core.tools import tool # type: ignore
from langgraph_qwen import ChatQwenOpenAICompat, bind_qwen_tools
@tool
def get_time(_: str = "") -> str:
"""Get current local time in ISO format."""
import datetime as _dt
return _dt.datetime.now().isoformat()
@tool
def add(x_and_y: str) -> str:
"""Add two integers given as 'x y'."""
xs = [int(s) for s in x_and_y.strip().split()]
if len(xs) != 2:
return "Please provide two integers: 'x y'"
return str(xs[0] + xs[1])
def main():
# Prefer the custom adapter to avoid extra deps (langchain_openai)
base = ChatQwenOpenAICompat(temperature=0)
model = bind_qwen_tools(base, [get_time, add], tool_choice="auto")
agent = create_react_agent(model, [get_time, add])
# A prompt that can trigger multiple tool calls
question = (
"现在是几点?然后把 7 和 35 相加,并把两者结果合并成一句简洁中文回答。"
)
result = agent.invoke({"messages": [HumanMessage(content=question)]})
# Unpack final message text
last = result["messages"][-1]
print("\n=== Agent Final Answer ===\n")
print(getattr(last, "content", last))
if __name__ == "__main__":
main()