57 lines
1.7 KiB
Python
57 lines
1.7 KiB
Python
"""
|
|
Streaming example with LangGraph + Qwen. Prints incremental model tokens and
|
|
tool call events.
|
|
|
|
Prereqs:
|
|
pip install -U langgraph langchain langchain-openai
|
|
Env:
|
|
export QWEN_API_KEY=sk-...
|
|
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1
|
|
export QWEN_MODEL=qwen3-coder-flash
|
|
"""
|
|
|
|
from langgraph.prebuilt import create_react_agent
|
|
from langchain_core.messages import HumanMessage
|
|
|
|
try:
|
|
from langchain.tools import tool
|
|
except Exception: # older versions
|
|
from langchain_core.tools import tool # type: ignore
|
|
|
|
from langgraph_qwen import ChatQwenOpenAICompat, bind_qwen_tools
|
|
|
|
|
|
@tool
|
|
def calc(expr: str) -> str:
|
|
"""Safely evaluate a simple arithmetic expression."""
|
|
import math
|
|
allowed = {k: getattr(math, k) for k in ["sqrt", "sin", "cos"]}
|
|
try:
|
|
return str(eval(expr, {"__builtins__": {}}, allowed))
|
|
except Exception as e:
|
|
return f"error: {e}"
|
|
|
|
|
|
def main():
|
|
base = ChatQwenOpenAICompat(temperature=0)
|
|
model = bind_qwen_tools(base, [calc], tool_choice="auto")
|
|
agent = create_react_agent(model, [calc])
|
|
|
|
inputs = {"messages": [HumanMessage(content="先说一句你好,然后计算 sqrt(144)。")]}
|
|
|
|
print("Streaming events (tokens and tool calls):\n")
|
|
for ev in agent.stream(inputs, stream_mode="values"):
|
|
# ev is the partial state (dict with messages)
|
|
msg = ev["messages"][-1]
|
|
role = getattr(msg, "type", getattr(msg, "role", ""))
|
|
content = getattr(msg, "content", "")
|
|
# Filter to only incremental assistant outputs
|
|
if role in ("ai", "assistant"):
|
|
print(content, end="", flush=True)
|
|
|
|
print("\n\nDone.")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|