forked from lingyuzeng/agent
langgraph basic use in tool inject
This commit is contained in:
46
examples/qwen_langgraph_custom_model.py
Normal file
46
examples/qwen_langgraph_custom_model.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""
|
||||
Use the custom BaseChatModel-like class ChatQwenOpenAICompat with LangGraph.
|
||||
|
||||
Prereqs:
|
||||
pip install -U langgraph langchain httpx
|
||||
|
||||
Env:
|
||||
export QWEN_API_KEY=sk-...
|
||||
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1
|
||||
export QWEN_MODEL=qwen3-coder-flash
|
||||
"""
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
try:
|
||||
from langchain.tools import tool
|
||||
except Exception:
|
||||
from langchain_core.tools import tool # type: ignore
|
||||
|
||||
from langgraph_qwen import ChatQwenOpenAICompat
|
||||
|
||||
|
||||
@tool
|
||||
def multiply(x_and_y: str) -> str:
|
||||
"""Multiply two integers given as 'x y'."""
|
||||
try:
|
||||
a, b = [int(s) for s in x_and_y.strip().split()]
|
||||
except Exception:
|
||||
return "Please provide two integers: 'x y'"
|
||||
return str(a * b)
|
||||
|
||||
|
||||
def main():
|
||||
base = ChatQwenOpenAICompat(temperature=0)
|
||||
model = base.bind_tools([multiply]).bind(tool_choice="auto")
|
||||
|
||||
agent = create_react_agent(model, [multiply])
|
||||
res = agent.invoke({"messages": [HumanMessage(content="计算 6 和 7 的乘积,然后解释你的步骤。")]} )
|
||||
print("\n=== Final ===\n")
|
||||
print(res["messages"][-1].content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
65
examples/qwen_langgraph_react.py
Normal file
65
examples/qwen_langgraph_react.py
Normal file
@@ -0,0 +1,65 @@
|
||||
"""
|
||||
Minimal LangGraph ReAct agent using Qwen3-Coder(-Flash) via OpenAI-compatible API.
|
||||
|
||||
Prereqs:
|
||||
pip install -U langgraph langchain langchain-openai
|
||||
|
||||
Env:
|
||||
export QWEN_API_KEY=sk-...
|
||||
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1 # or your gateway
|
||||
export QWEN_MODEL=qwen3-coder-flash # use console's actual model name
|
||||
"""
|
||||
import sys,os
|
||||
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
try:
|
||||
from langchain.tools import tool
|
||||
except Exception: # older versions
|
||||
from langchain_core.tools import tool # type: ignore
|
||||
|
||||
from langgraph_qwen import ChatQwenOpenAICompat, bind_qwen_tools
|
||||
|
||||
|
||||
@tool
|
||||
def get_time(_: str = "") -> str:
|
||||
"""Get current local time in ISO format."""
|
||||
import datetime as _dt
|
||||
return _dt.datetime.now().isoformat()
|
||||
|
||||
|
||||
@tool
|
||||
def add(x_and_y: str) -> str:
|
||||
"""Add two integers given as 'x y'."""
|
||||
xs = [int(s) for s in x_and_y.strip().split()]
|
||||
if len(xs) != 2:
|
||||
return "Please provide two integers: 'x y'"
|
||||
return str(xs[0] + xs[1])
|
||||
|
||||
|
||||
def main():
|
||||
# Prefer the custom adapter to avoid extra deps (langchain_openai)
|
||||
base = ChatQwenOpenAICompat(temperature=0)
|
||||
model = bind_qwen_tools(base, [get_time, add], tool_choice="auto")
|
||||
|
||||
agent = create_react_agent(model, [get_time, add])
|
||||
|
||||
# A prompt that can trigger multiple tool calls
|
||||
question = (
|
||||
"现在是几点?然后把 7 和 35 相加,并把两者结果合并成一句简洁中文回答。"
|
||||
)
|
||||
|
||||
result = agent.invoke({"messages": [HumanMessage(content=question)]})
|
||||
|
||||
# Unpack final message text
|
||||
last = result["messages"][-1]
|
||||
print("\n=== Agent Final Answer ===\n")
|
||||
print(getattr(last, "content", last))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
56
examples/qwen_langgraph_stream.py
Normal file
56
examples/qwen_langgraph_stream.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""
|
||||
Streaming example with LangGraph + Qwen. Prints incremental model tokens and
|
||||
tool call events.
|
||||
|
||||
Prereqs:
|
||||
pip install -U langgraph langchain langchain-openai
|
||||
Env:
|
||||
export QWEN_API_KEY=sk-...
|
||||
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1
|
||||
export QWEN_MODEL=qwen3-coder-flash
|
||||
"""
|
||||
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
try:
|
||||
from langchain.tools import tool
|
||||
except Exception: # older versions
|
||||
from langchain_core.tools import tool # type: ignore
|
||||
|
||||
from langgraph_qwen import ChatQwenOpenAICompat, bind_qwen_tools
|
||||
|
||||
|
||||
@tool
|
||||
def calc(expr: str) -> str:
|
||||
"""Safely evaluate a simple arithmetic expression."""
|
||||
import math
|
||||
allowed = {k: getattr(math, k) for k in ["sqrt", "sin", "cos"]}
|
||||
try:
|
||||
return str(eval(expr, {"__builtins__": {}}, allowed))
|
||||
except Exception as e:
|
||||
return f"error: {e}"
|
||||
|
||||
|
||||
def main():
|
||||
base = ChatQwenOpenAICompat(temperature=0)
|
||||
model = bind_qwen_tools(base, [calc], tool_choice="auto")
|
||||
agent = create_react_agent(model, [calc])
|
||||
|
||||
inputs = {"messages": [HumanMessage(content="先说一句你好,然后计算 sqrt(144)。")]}
|
||||
|
||||
print("Streaming events (tokens and tool calls):\n")
|
||||
for ev in agent.stream(inputs, stream_mode="values"):
|
||||
# ev is the partial state (dict with messages)
|
||||
msg = ev["messages"][-1]
|
||||
role = getattr(msg, "type", getattr(msg, "role", ""))
|
||||
content = getattr(msg, "content", "")
|
||||
# Filter to only incremental assistant outputs
|
||||
if role in ("ai", "assistant"):
|
||||
print(content, end="", flush=True)
|
||||
|
||||
print("\n\nDone.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
43
examples/qwen_langgraph_sugar.py
Normal file
43
examples/qwen_langgraph_sugar.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
Example using the sugar: create_qwen_react_agent(...)
|
||||
|
||||
Prereqs:
|
||||
uv pip install -e '.[openai,custom]'
|
||||
|
||||
Env:
|
||||
export QWEN_API_KEY=sk-...
|
||||
export QWEN_BASE_URL=https://dashscope-intl.aliyuncs.com/compatible-mode/v1
|
||||
export QWEN_MODEL=qwen3-coder-flash
|
||||
"""
|
||||
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
try:
|
||||
from langchain.tools import tool
|
||||
except Exception:
|
||||
from langchain_core.tools import tool # type: ignore
|
||||
|
||||
from langgraph_qwen import create_qwen_react_agent
|
||||
|
||||
|
||||
@tool
|
||||
def echo(text: str) -> str:
|
||||
"""Echo back the given text."""
|
||||
return text
|
||||
|
||||
|
||||
def main():
|
||||
agent = create_qwen_react_agent(
|
||||
[echo],
|
||||
prefer="custom", # use our custom BaseChatModel by default
|
||||
model_kwargs={"temperature": 0},
|
||||
)
|
||||
|
||||
res = agent.invoke({"messages": [HumanMessage(content="调用 echo 工具返回:你好,LangGraph!")]})
|
||||
print("\n=== Final ===\n")
|
||||
print(res["messages"][-1].content)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user