|
| 1 | +"""LangChain v1 MCP tools example (ported from LangGraph version). |
| 2 | +
|
| 3 | +This script demonstrates how to use LangChain v1 agent syntax with MCP tools |
| 4 | +exposed by the GitHub MCP endpoint. It preserves the Azure OpenAI vs GitHub |
| 5 | +model selection logic from the original LangGraph based example. |
| 6 | +""" |
| 7 | + |
| 8 | +from __future__ import annotations |
| 9 | + |
| 10 | +import asyncio |
| 11 | +import logging |
| 12 | +import os |
| 13 | +from pathlib import Path |
| 14 | + |
| 15 | +import azure.identity |
| 16 | +from dotenv import load_dotenv |
| 17 | +from langchain.agents import create_agent |
| 18 | +from langchain.agents.middleware import AgentMiddleware, AgentState, ModelRequest |
| 19 | +from langchain_core.messages import AIMessage, HumanMessage, ToolMessage |
| 20 | +from langchain_mcp_adapters.client import MultiServerMCPClient |
| 21 | +from langchain_openai import AzureChatOpenAI, ChatOpenAI |
| 22 | +from rich.logging import RichHandler |
| 23 | + |
| 24 | +logging.basicConfig(level=logging.WARNING, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()]) |
| 25 | +logger = logging.getLogger("lang_triage") |
| 26 | + |
| 27 | +load_dotenv(override=True) |
| 28 | +API_HOST = os.getenv("API_HOST", "github") |
| 29 | + |
| 30 | +if API_HOST == "azure": |
| 31 | + token_provider = azure.identity.get_bearer_token_provider( |
| 32 | + azure.identity.DefaultAzureCredential(), |
| 33 | + "https://cognitiveservices.azure.com/.default", |
| 34 | + ) |
| 35 | + base_model = AzureChatOpenAI( |
| 36 | + azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"), |
| 37 | + azure_deployment=os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT"), |
| 38 | + openai_api_version=os.environ.get("AZURE_OPENAI_VERSION"), |
| 39 | + azure_ad_token_provider=token_provider, |
| 40 | + ) |
| 41 | +elif API_HOST == "github": |
| 42 | + base_model = ChatOpenAI( |
| 43 | + model=os.getenv("GITHUB_MODEL", "gpt-4o"), |
| 44 | + base_url="https://models.inference.ai.azure.com", |
| 45 | + api_key=os.environ.get("GITHUB_TOKEN"), |
| 46 | + ) |
| 47 | +elif API_HOST == "ollama": |
| 48 | + base_model = ChatOpenAI( |
| 49 | + model=os.environ.get("OLLAMA_MODEL", "llama3.1"), |
| 50 | + base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), |
| 51 | + api_key="none", |
| 52 | + ) |
| 53 | +else: |
| 54 | + base_model = ChatOpenAI(model=os.getenv("OPENAI_MODEL", "gpt-4o-mini")) |
| 55 | + |
| 56 | + |
| 57 | +class ToolCallLimitMiddleware(AgentMiddleware): |
| 58 | + def __init__(self, limit) -> None: |
| 59 | + super().__init__() |
| 60 | + self.limit = limit |
| 61 | + |
| 62 | + def modify_model_request(self, request: ModelRequest, state: AgentState) -> ModelRequest: |
| 63 | + tool_call_count = sum(1 for msg in state["messages"] if isinstance(msg, AIMessage) and msg.tool_calls) |
| 64 | + if tool_call_count >= self.limit: |
| 65 | + logger.info("Tool call limit of %d reached, disabling further tool calls.", self.limit) |
| 66 | + request.tools = [] |
| 67 | + return request |
| 68 | + |
| 69 | + |
| 70 | +async def main(): |
| 71 | + client = MultiServerMCPClient( |
| 72 | + { |
| 73 | + "github": { |
| 74 | + "url": "https://api.githubcopilot.com/mcp/", |
| 75 | + "transport": "streamable_http", |
| 76 | + "headers": {"Authorization": f"Bearer {os.getenv('GITHUB_TOKEN', '')}"}, |
| 77 | + } |
| 78 | + } |
| 79 | + ) |
| 80 | + |
| 81 | + tools = await client.get_tools() |
| 82 | + tools = [t for t in tools if t.name in ("list_issues", "search_code", "search_issues", "search_pull_requests")] |
| 83 | + agent = create_agent(base_model, tools, middleware=[ToolCallLimitMiddleware(limit=5)]) |
| 84 | + |
| 85 | + stale_prompt_path = Path(__file__).parent / "staleprompt.md" |
| 86 | + with stale_prompt_path.open("r", encoding="utf-8") as f: |
| 87 | + stale_prompt = f.read() |
| 88 | + |
| 89 | + user_content = stale_prompt + " Find one issue from Azure-samples azure-search-openai-demo that can be closed." |
| 90 | + |
| 91 | + async for step in agent.astream({"messages": [HumanMessage(content=user_content)]}, stream_mode="updates"): |
| 92 | + for step_name, step_data in step.items(): |
| 93 | + last_message = step_data["messages"][-1] |
| 94 | + if isinstance(last_message, AIMessage) and last_message.tool_calls: |
| 95 | + tool_name = last_message.tool_calls[0]["name"] |
| 96 | + tool_args = last_message.tool_calls[0]["args"] |
| 97 | + logger.info(f"Calling tool '{tool_name}' with args: {tool_args}") |
| 98 | + elif isinstance(last_message, ToolMessage): |
| 99 | + logger.info(f"Got tool result: {step_data['messages'][-1].content[0:200]}...") |
| 100 | + else: |
| 101 | + logger.info(f"Response: {step_data['messages'][-1].content}") |
| 102 | + |
| 103 | + |
| 104 | +if __name__ == "__main__": |
| 105 | + logger.setLevel(logging.INFO) |
| 106 | + asyncio.run(main()) |
0 commit comments