forked from lingyuzeng/agent
92 lines
3.1 KiB
Python
92 lines
3.1 KiB
Python
"""
|
||
Full example: Start a FastMCP-style HTTP MCP server, then use
|
||
langchain-mcp-adapters to inject MCP tools into a LangGraph + Qwen agent.
|
||
|
||
Steps:
|
||
1) Start the local HTTP MCP server (fallback minimal):
|
||
- uv pip install fastapi uvicorn
|
||
- uvicorn examples.mcp_adapters.fastmcp_server:http_app --host 127.0.0.1 --port 8010
|
||
|
||
The MCP endpoint is available at: http://127.0.0.1:8010/mcp/
|
||
|
||
2) Install mcp-adapters:
|
||
- uv pip install -e '.[mcp-adapters]'
|
||
|
||
3) Configure adapter entry + config (choose one):
|
||
A) Explicit entry (recommended):
|
||
export MCP_ADAPTER_ENTRY='langchain_mcp_adapters:create_tools'
|
||
export MCP_CONFIG_JSON='{"servers":{"local":{"url":"http://127.0.0.1:8010/mcp/","transport":"streamable_http"}}}'
|
||
|
||
B) If your adapter provides a different function:
|
||
export MCP_ADAPTER_ENTRY='your_module:your_entry'
|
||
export MCP_CONFIG_JSON='{}'
|
||
|
||
4) Qwen env (or .env auto-loaded):
|
||
- QWEN_API_KEY, QWEN_BASE_URL, QWEN_MODEL, ...
|
||
|
||
5) Run this example:
|
||
- python examples/mcp_adapters/inject_to_langgraph.py
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import importlib
|
||
from typing import Any, Dict, List
|
||
|
||
import asyncio
|
||
from langchain_core.messages import HumanMessage
|
||
from langgraph_qwen.chat_model import ChatQwenOpenAICompat
|
||
from langgraph.prebuilt import create_react_agent
|
||
|
||
def _env(name: str, default: str = "") -> str:
|
||
v = os.getenv(name)
|
||
return v if v else default
|
||
|
||
async def _load_tools_via_client() -> List[Any]:
|
||
try:
|
||
from langchain_mcp_adapters.client import MultiServerMCPClient # type: ignore
|
||
except Exception as e:
|
||
raise RuntimeError("Please install langchain-mcp-adapters: uv pip install -e '.[mcp-adapters]'") from e
|
||
|
||
weather_url = _env("WEATHER_MCP_URL", "http://localhost:8000/mcp")
|
||
weather_transport = _env("WEATHER_TRANSPORT", "streamable_http")
|
||
|
||
client = MultiServerMCPClient(
|
||
{
|
||
"weather": {
|
||
"url": weather_url,
|
||
"transport": weather_transport,
|
||
}
|
||
}
|
||
)
|
||
tools = await client.get_tools()
|
||
# Best-effort cleanup if client exposes a close method
|
||
try:
|
||
if hasattr(client, "close") and callable(getattr(client, "close")):
|
||
await client.close() # type: ignore
|
||
elif hasattr(client, "close_all_sessions") and callable(getattr(client, "close_all_sessions")):
|
||
await client.close_all_sessions() # type: ignore
|
||
except Exception:
|
||
pass
|
||
return tools
|
||
|
||
async def main():
|
||
tools = await _load_tools_via_client()
|
||
print("Discovered tools:")
|
||
for t in tools:
|
||
print(" -", getattr(t, "name", "<noname>"))
|
||
|
||
model = ChatQwenOpenAICompat(temperature=0).bind(tool_choice="auto")
|
||
# 或直接:model = ChatQwenOpenAICompat(temperature=0).bind_tools(tools).bind(tool_choice="auto")
|
||
agent = create_react_agent(model, tools)
|
||
|
||
prompt = (
|
||
"请先列出可用工具名,然后选择一个合理的工具做一次演示调用,并用简洁中文总结结果。"
|
||
)
|
||
res = await agent.ainvoke({"messages": [HumanMessage(content=prompt)]})
|
||
print("=== Final ===")
|
||
print(res["messages"][-1].content)
|
||
|
||
if __name__ == "__main__":
|
||
asyncio.run(main())
|