Skip to content

Commit 9380f86

Browse files
authored
Merge pull request #26 from Azure-Samples/langchainupdates
Update langchain examples
2 parents d9ce4be + 142dce7 commit 9380f86

10 files changed

+244
-168
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,10 +105,10 @@ You can run the examples in this repository by executing the scripts in the `exa
105105
| [langchainv1_tools.py](examples/langchainv1_tools.py) | Uses LangChain v1 to build a weekend planning agent with multiple tools. |
106106
| [langchainv1_supervisor.py](examples/langchainv1_supervisor.py) | Uses LangChain v1 with a supervisor orchestrating activity and recipe sub-agents. |
107107
| [langchainv1_quickstart.py](examples/langchainv1_quickstart.py) | Uses LangChain v1 to build an assistant with tool calling, structured output, and memory. Based off official Quickstart docs. |
108+
| [langchainv1_mcp_github.py](examples/langchainv1_mcp_github.py) | Uses Langchain v1 agent with GitHub MCP server to triage repository issues. |
109+
| [langchainv1_mcp_http.py](examples/langchainv1_mcp_github.py) | Uses Langchain v1 agent with tools from local MCP HTTP server. |
108110
| [langgraph_agent.py](examples/langgraph_agent.py) | Builds LangGraph graph for an agent to play songs. |
109-
| [langgraph_mcp_http.py](examples/langgraph_mcp_http.py) | Uses LangGraph with ReAct agent that uses tools from local MCP HTTP server. |
110-
| [langgraph_mcp_http_graph.py](examples/langgraph_mcp_http_graph.py) | Builds a custom LangGraph state graph using tools from local MCP HTTP server. |
111-
| [langgraph_mcp_github.py](examples/langgraph_mcp_github.py) | Uses a LangGraph with agent and GitHub MCP server to triage repository issues. |
111+
| [langgraph_mcp.py](examples/langgraph_mcp.py) | Builds Langgraph graph that uses tools from MCP HTTP server. |
112112

113113
### OpenAI and OpenAI-Agents
114114

examples/langchainv1_mcp_github.py

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
"""LangChain v1 MCP tools example (ported from LangGraph version).
2+
3+
This script demonstrates how to use LangChain v1 agent syntax with MCP tools
4+
exposed by the GitHub MCP endpoint. It preserves the Azure OpenAI vs GitHub
5+
model selection logic from the original LangGraph based example.
6+
"""
7+
8+
from __future__ import annotations
9+
10+
import asyncio
11+
import logging
12+
import os
13+
from pathlib import Path
14+
15+
import azure.identity
16+
from dotenv import load_dotenv
17+
from langchain.agents import create_agent
18+
from langchain.agents.middleware import AgentMiddleware, AgentState, ModelRequest
19+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
20+
from langchain_mcp_adapters.client import MultiServerMCPClient
21+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
22+
from rich.logging import RichHandler
23+
24+
logging.basicConfig(level=logging.WARNING, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()])
25+
logger = logging.getLogger("lang_triage")
26+
27+
load_dotenv(override=True)
28+
API_HOST = os.getenv("API_HOST", "github")
29+
30+
if API_HOST == "azure":
31+
token_provider = azure.identity.get_bearer_token_provider(
32+
azure.identity.DefaultAzureCredential(),
33+
"https://cognitiveservices.azure.com/.default",
34+
)
35+
base_model = AzureChatOpenAI(
36+
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
37+
azure_deployment=os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT"),
38+
openai_api_version=os.environ.get("AZURE_OPENAI_VERSION"),
39+
azure_ad_token_provider=token_provider,
40+
)
41+
elif API_HOST == "github":
42+
base_model = ChatOpenAI(
43+
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
44+
base_url="https://models.inference.ai.azure.com",
45+
api_key=os.environ.get("GITHUB_TOKEN"),
46+
)
47+
elif API_HOST == "ollama":
48+
base_model = ChatOpenAI(
49+
model=os.environ.get("OLLAMA_MODEL", "llama3.1"),
50+
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
51+
api_key="none",
52+
)
53+
else:
54+
base_model = ChatOpenAI(model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"))
55+
56+
57+
class ToolCallLimitMiddleware(AgentMiddleware):
58+
def __init__(self, limit) -> None:
59+
super().__init__()
60+
self.limit = limit
61+
62+
def modify_model_request(self, request: ModelRequest, state: AgentState) -> ModelRequest:
63+
tool_call_count = sum(1 for msg in state["messages"] if isinstance(msg, AIMessage) and msg.tool_calls)
64+
if tool_call_count >= self.limit:
65+
logger.info("Tool call limit of %d reached, disabling further tool calls.", self.limit)
66+
request.tools = []
67+
return request
68+
69+
70+
async def main():
71+
client = MultiServerMCPClient(
72+
{
73+
"github": {
74+
"url": "https://api.githubcopilot.com/mcp/",
75+
"transport": "streamable_http",
76+
"headers": {"Authorization": f"Bearer {os.getenv('GITHUB_TOKEN', '')}"},
77+
}
78+
}
79+
)
80+
81+
tools = await client.get_tools()
82+
tools = [t for t in tools if t.name in ("list_issues", "search_code", "search_issues", "search_pull_requests")]
83+
agent = create_agent(base_model, tools, middleware=[ToolCallLimitMiddleware(limit=5)])
84+
85+
stale_prompt_path = Path(__file__).parent / "staleprompt.md"
86+
with stale_prompt_path.open("r", encoding="utf-8") as f:
87+
stale_prompt = f.read()
88+
89+
user_content = stale_prompt + " Find one issue from Azure-samples azure-search-openai-demo that can be closed."
90+
91+
async for step in agent.astream({"messages": [HumanMessage(content=user_content)]}, stream_mode="updates"):
92+
for step_name, step_data in step.items():
93+
last_message = step_data["messages"][-1]
94+
if isinstance(last_message, AIMessage) and last_message.tool_calls:
95+
tool_name = last_message.tool_calls[0]["name"]
96+
tool_args = last_message.tool_calls[0]["args"]
97+
logger.info(f"Calling tool '{tool_name}' with args: {tool_args}")
98+
elif isinstance(last_message, ToolMessage):
99+
logger.info(f"Got tool result: {step_data['messages'][-1].content[0:200]}...")
100+
else:
101+
logger.info(f"Response: {step_data['messages'][-1].content}")
102+
103+
104+
if __name__ == "__main__":
105+
logger.setLevel(logging.INFO)
106+
asyncio.run(main())

examples/langchainv1_mcp_http.py

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
"""LangChain v1 agent + MCP HTTP itinerary server example.
2+
3+
Prerequisite:
4+
Start the local MCP server defined in `mcp_server_basic.py` on port 8000:
5+
python examples/mcp_server_basic.py
6+
"""
7+
import asyncio
8+
import logging
9+
import os
10+
11+
import azure.identity
12+
from dotenv import load_dotenv
13+
from langchain.agents import create_agent
14+
from langchain_core.messages import HumanMessage
15+
from langchain_mcp_adapters.client import MultiServerMCPClient
16+
from langchain_openai import AzureChatOpenAI, ChatOpenAI
17+
from rich.logging import RichHandler
18+
19+
logging.basicConfig(level=logging.WARNING, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()])
20+
logger = logging.getLogger("lang_itinerary")
21+
22+
load_dotenv(override=True)
23+
API_HOST = os.getenv("API_HOST", "github")
24+
25+
if API_HOST == "azure":
26+
token_provider = azure.identity.get_bearer_token_provider(
27+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
28+
)
29+
base_model = AzureChatOpenAI(
30+
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
31+
azure_deployment=os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT"),
32+
openai_api_version=os.environ.get("AZURE_OPENAI_VERSION"),
33+
azure_ad_token_provider=token_provider,
34+
)
35+
elif API_HOST == "github":
36+
base_model = ChatOpenAI(
37+
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
38+
base_url="https://models.inference.ai.azure.com",
39+
api_key=os.environ.get("GITHUB_TOKEN"),
40+
)
41+
elif API_HOST == "ollama":
42+
base_model = ChatOpenAI(
43+
model=os.environ.get("OLLAMA_MODEL", "llama3.1"),
44+
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
45+
api_key="none",
46+
)
47+
else:
48+
base_model = ChatOpenAI(model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"))
49+
50+
51+
async def run_agent():
52+
client = MultiServerMCPClient(
53+
{
54+
"itinerary": {
55+
# Make sure you start your itinerary server on port 8000
56+
"url": "http://localhost:8000/mcp/",
57+
"transport": "streamable_http",
58+
}
59+
}
60+
)
61+
62+
tools = await client.get_tools()
63+
agent = create_agent(base_model, tools)
64+
65+
user_query = (
66+
"Find me a hotel in San Francisco for 2 nights starting from 2026-01-01. "
67+
"I need a hotel with free WiFi and a pool."
68+
)
69+
70+
response = await agent.ainvoke({"messages": [HumanMessage(content=user_query)]})
71+
final = response["messages"][-1].content
72+
print(final)
73+
74+
75+
def main():
76+
asyncio.run(run_agent())
77+
78+
79+
if __name__ == "__main__":
80+
logger.setLevel(logging.INFO)
81+
main()

examples/langchainv1_supervisor.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -94,9 +94,9 @@ def get_current_date() -> str:
9494

9595

9696
@tool
97-
def weekend_agent_tool(query: str) -> str:
98-
"""Invoke the activity planning agent and return its final response as plain text."""
99-
logger.info("Tool:weekend_agent invoked")
97+
def plan_weekend(query: str) -> str:
98+
"""Plan a weekend based on user query and return the final response."""
99+
logger.info("Tool: plan_weekend invoked")
100100
response = weekend_agent.invoke({"messages": [HumanMessage(content=query)]})
101101
final = response["messages"][-1].content
102102
return final
@@ -159,9 +159,9 @@ def check_fridge() -> list[str]:
159159

160160

161161
@tool
162-
def meal_agent_tool(query: str) -> str:
163-
"""Invoke the recipe planning agent and return its final response as plain text."""
164-
logger.info("Tool:meal_agent invoked")
162+
def plan_meal(query: str) -> str:
163+
"""Plan a meal based on user query and return the final response."""
164+
logger.info("Tool: plan_meal invoked")
165165
response = meal_agent.invoke({"messages": [HumanMessage(content=query)]})
166166
final = response["messages"][-1].content
167167
return final
@@ -176,7 +176,7 @@ def meal_agent_tool(query: str) -> str:
176176
"You are a supervisor, managing an activity planning agent and recipe planning agent."
177177
"Assign work to them as needed in order to answer user's question."
178178
),
179-
tools=[weekend_agent_tool, meal_agent_tool],
179+
tools=[plan_weekend, plan_meal],
180180
)
181181

182182

examples/langgraph_agent.py

Lines changed: 27 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,33 @@
1010
from langgraph.graph import END, START, MessagesState, StateGraph
1111
from langgraph.prebuilt import ToolNode
1212

13+
# Setup the client to use either Azure OpenAI or GitHub Models
14+
load_dotenv(override=True)
15+
API_HOST = os.getenv("API_HOST", "github")
16+
17+
if API_HOST == "azure":
18+
token_provider = azure.identity.get_bearer_token_provider(
19+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
20+
)
21+
model = AzureChatOpenAI(
22+
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
23+
azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
24+
openai_api_version=os.environ["AZURE_OPENAI_VERSION"],
25+
azure_ad_token_provider=token_provider,
26+
)
27+
elif API_HOST == "github":
28+
model = ChatOpenAI(
29+
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
30+
base_url="https://models.inference.ai.azure.com",
31+
api_key=os.environ["GITHUB_TOKEN"],
32+
)
33+
elif API_HOST == "ollama":
34+
model = ChatOpenAI(
35+
model=os.environ["OLLAMA_MODEL"],
36+
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
37+
api_key="none",
38+
)
39+
1340

1441
@tool
1542
def play_song_on_spotify(song: str):
@@ -27,24 +54,6 @@ def play_song_on_apple(song: str):
2754

2855
tools = [play_song_on_apple, play_song_on_spotify]
2956
tool_node = ToolNode(tools)
30-
31-
# Setup the client to use either Azure OpenAI or GitHub Models
32-
load_dotenv(override=True)
33-
API_HOST = os.getenv("API_HOST", "github")
34-
35-
if API_HOST == "azure":
36-
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
37-
model = AzureChatOpenAI(
38-
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
39-
azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
40-
openai_api_version=os.environ["AZURE_OPENAI_VERSION"],
41-
azure_ad_token_provider=token_provider,
42-
)
43-
elif API_HOST == "github":
44-
model = ChatOpenAI(model=os.getenv("GITHUB_MODEL", "gpt-4o"), base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
45-
elif API_HOST == "ollama":
46-
model = ChatOpenAI(model=os.environ["OLLAMA_MODEL"], base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"), api_key="none")
47-
4857
model = model.bind_tools(tools, parallel_tool_calls=False)
4958

5059

examples/langgraph_mcp_http_graph.py renamed to examples/langgraph_mcp.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,21 @@
1919
API_HOST = os.getenv("API_HOST", "github")
2020

2121
if API_HOST == "azure":
22-
token_provider = azure.identity.get_bearer_token_provider(azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
22+
token_provider = azure.identity.get_bearer_token_provider(
23+
azure.identity.DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
24+
)
2325
model = AzureChatOpenAI(
2426
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
2527
azure_deployment=os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT"],
2628
openai_api_version=os.environ["AZURE_OPENAI_VERSION"],
2729
azure_ad_token_provider=token_provider,
2830
)
2931
else:
30-
model = ChatOpenAI(model=os.getenv("GITHUB_MODEL", "gpt-4o"), base_url="https://models.inference.ai.azure.com", api_key=os.environ["GITHUB_TOKEN"])
32+
model = ChatOpenAI(
33+
model=os.getenv("GITHUB_MODEL", "gpt-4o"),
34+
base_url="https://models.inference.ai.azure.com",
35+
api_key=os.environ["GITHUB_TOKEN"],
36+
)
3137

3238

3339
async def setup_agent():
@@ -56,7 +62,9 @@ def call_model(state: MessagesState):
5662
)
5763
builder.add_edge("tools", "call_model")
5864
graph = builder.compile()
59-
hotel_response = await graph.ainvoke({"messages": "Find me a hotel in San Francisco for 2 nights starting from 2024-01-01. I need a hotel with free WiFi and a pool."})
65+
hotel_response = await graph.ainvoke(
66+
{"messages": "Find a hotel in SF for 2 nights starting from 2024-01-01. I need free WiFi and pool."}
67+
)
6068
print(hotel_response["messages"][-1].content)
6169
image_bytes = graph.get_graph().draw_mermaid_png()
6270
with open("examples/images/langgraph_mcp_http_graph.png", "wb") as f:

0 commit comments

Comments
 (0)