Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
MODEL_NAME=gemini-2.5-flash
GOOGLE_API_KEY=******************
AGENT_BASE_URL=http://localhost:8000/a2a
56 changes: 56 additions & 0 deletions samples/python/agents/a2a-multiple-agents-on-single-host/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# a2a-multiple-agents-on-single-host

This repository demonstrates how to run **multiple A2A agents** on the **same host** using the A2A protocol.
Each agent is served at a **unique URL path**, making it possible to host different agents without requiring multiple servers or ports.

---

## 📌 Example Setup

Three agents running on the same host:

| Agent Name | Agent card URL |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
| Conversational Agent | [http://localhost:8000/a2a/conversation/agent-card.json](http://localhost:8000/a2a/conversation/agent-card.json) |
| Trending topics Agent | [http://localhost:8000/a2a/trending/agent-card.json](http://localhost:8000/a2a/trending/agent-card.json) |
| Analyzer Agent | [http://localhost:8000/a2a/analyzer/agent-card.json](http://localhost:8000/a2a/analyzer/agent-card.json) |


---

## 🚀 Running Agents Locally

1. Navigate to the sample code directory
```bash
cd samples/python/agents/a2a-multiple-agents-on-single-host
```

2. Install dependencies (using uv)
```bash
uv venv
source .venv/bin/activate
uv sync
```

3. Set environment variables
```bash
cp .env-sample .env
```

Update values as needed.

4. Start the agents
```bash
uv run main.py
```

---

### Testing using CLI

```shell
cd samples/python/hosts/cli
uv run . --agent http://localhost:8000/a2a/conversation/
```

---
52 changes: 52 additions & 0 deletions samples/python/agents/a2a-multiple-agents-on-single-host/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import logging
import os

import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI

from src.agent.agent_utils import setup_agent
from src.agent.analyzer_agent import get_analyzer_agent, get_analyzer_agent_card
from src.agent.conversation_agent import get_conversational_agent_card, get_conversational_agent
from src.agent.trending_topics_agent import get_trending_topics_agent, get_trending_topics_agent_card

load_dotenv()

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()

AGENT_BASE_URL = os.getenv('AGENT_BASE_URL')

if not AGENT_BASE_URL:
raise ValueError("AGENT_BASE_URL environment variable must be set")

MODEL_NAME = os.getenv('MODEL_NAME')

if not MODEL_NAME:
raise ValueError("MODEL_NAME environment variable must be set")

logger.info(f"AGENT BASE URL {AGENT_BASE_URL} and MODEL NAME {MODEL_NAME} are set")

app: FastAPI = FastAPI(title="Run multiple agents on single host using A2A protocol.",
description="Run multiple agents on single host using A2A protocol.",
version="1.0.0",
root_path="/a2a")


@app.get("/health")
async def health_check() -> dict[str, str]:
return {"status": "ok"}


setup_agent(name="conversation", get_agent=get_conversational_agent, get_agent_card=get_conversational_agent_card,
model_name=MODEL_NAME,
agent_base_url=AGENT_BASE_URL, app=app)
setup_agent(name="trending_topics", get_agent=get_trending_topics_agent, get_agent_card=get_trending_topics_agent_card,
model_name=MODEL_NAME,
agent_base_url=AGENT_BASE_URL, app=app)
setup_agent(name="analyzer", get_agent=get_analyzer_agent, get_agent_card=get_analyzer_agent_card,
model_name=MODEL_NAME,
agent_base_url=AGENT_BASE_URL, app=app)

if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=8000)
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[project]
name = "a2a-multiple-agents-on-single-host"
version = "0.1.0"
description = "Run multiple agents on single host using A2A protocol."
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"a2a-sdk>=0.3.1",
"google-adk>=1.11.0",
]
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
from collections.abc import Callable
from typing import Any

from a2a.server.apps.jsonrpc.jsonrpc_app import JSONRPCApplication, CallContextBuilder
from a2a.server.context import ServerCallContext
from a2a.server.request_handlers import DefaultRequestHandler
from a2a.server.request_handlers.request_handler import RequestHandler
from a2a.server.tasks import InMemoryTaskStore
from a2a.types import (
AgentCard,
)
from a2a.utils.constants import (
AGENT_CARD_WELL_KNOWN_PATH,
DEFAULT_RPC_URL,
EXTENDED_AGENT_CARD_PATH,
)
from fastapi import FastAPI, APIRouter
from google.adk import Runner
from google.adk.a2a.executor.a2a_agent_executor import A2aAgentExecutorConfig, A2aAgentExecutor
from google.adk.agents import LlmAgent
from google.adk.artifacts import InMemoryArtifactService
from google.adk.memory import InMemoryMemoryService
from google.adk.sessions import InMemorySessionService
from starlette.applications import Starlette


def get_agent_request_handler(agent: LlmAgent):
runner = Runner(
app_name=agent.name,
agent=agent,
artifact_service=InMemoryArtifactService(),
session_service=InMemorySessionService(),
memory_service=InMemoryMemoryService()
)
config = A2aAgentExecutorConfig()
executor = A2aAgentExecutor(runner=runner, config=config)
return DefaultRequestHandler(agent_executor=executor, task_store=InMemoryTaskStore())


class A2AFastApiApp(JSONRPCApplication):
def __init__(self,
fastapi_app: FastAPI,
agent_card: AgentCard,
http_handler: RequestHandler,
extended_agent_card: AgentCard | None = None,
context_builder: CallContextBuilder | None = None,
card_modifier: Callable[[AgentCard], AgentCard] | None = None,
extended_card_modifier: Callable[[AgentCard, ServerCallContext], AgentCard] | None = None):
super().__init__(
agent_card=agent_card,
http_handler=http_handler,
extended_agent_card=extended_agent_card,
context_builder=context_builder,
card_modifier=card_modifier,
extended_card_modifier=extended_card_modifier
)
self.fastapi_app = fastapi_app

def build(self, agent_card_url: str = AGENT_CARD_WELL_KNOWN_PATH, rpc_url: str = DEFAULT_RPC_URL,
extended_agent_card_url: str = EXTENDED_AGENT_CARD_PATH, **kwargs: Any, ) -> Starlette:
name_prefix = rpc_url.replace("/", "")
router = APIRouter()
router.add_api_route(rpc_url, endpoint=self._handle_requests, name=f'{name_prefix}_a2a_handler',
methods=['POST'])
router.add_api_route(agent_card_url , endpoint=self._handle_get_agent_card, methods=['GET'],
name=f'{name_prefix}_agent_card')
self.fastapi_app.include_router(router)
return self.fastapi_app
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from typing import Callable

from a2a.types import AgentCard
from fastapi import FastAPI
from google.adk.agents import LlmAgent

from src.a2a.a2a_fastapi_app import get_agent_request_handler, A2AFastApiApp


def setup_agent(name: str,
get_agent: Callable[[str], LlmAgent],
get_agent_card: Callable[[str], AgentCard],
model_name: str,
agent_base_url: str,
app: FastAPI
) -> None:
agent = get_agent(model_name)
agent_request_handler = get_agent_request_handler(agent)
agent_card = get_agent_card(f"{agent_base_url}/{name}/")
agent_server = A2AFastApiApp(fastapi_app=app, agent_card=agent_card,
http_handler=agent_request_handler)
agent_server.build(rpc_url=f"/{name}/", agent_card_url=f"/{name}/{{path:path}}")
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from a2a.types import AgentCard, AgentCapabilities, TransportProtocol, AgentSkill
from google.adk.agents import LlmAgent
from google.adk.tools import google_search


def get_analyzer_agent(model: str) -> LlmAgent:
return LlmAgent(
model=model,
name='trend_analyzer_agent',
instruction="""
You are a data analyst specializing in trend analysis. When given a trending topic,
perform deep research to find quantitative data and insights.

For each trend you analyze:
1. Search for statistics, numbers, and metrics related to the trend
2. Look for:
- Engagement metrics (views, shares, mentions)
- Growth rates and timeline
- Geographic distribution
- Related hashtags or keywords
3. Provide concrete numbers and data points

Keep it somehow concise

Always prioritize quantitative information over qualitative descriptions.
""",
tools=[google_search],
)


def get_analyzer_agent_card(agent_url: str) -> AgentCard:
return AgentCard(
name='Trend Analyzer Agent',
url=agent_url,
description='Performs deep analysis of trends with quantitative data',
version='1.0',
capabilities=AgentCapabilities(streaming=True),
default_input_modes=['text/plain'],
default_output_modes=['text/plain'],
preferred_transport=TransportProtocol.jsonrpc,
skills=[
AgentSkill(
id='analyze_trend',
name='Analyze Trend',
description='Provides quantitative analysis of a specific trend',
tags=['analysis', 'data', 'metrics', 'statistics'],
examples=[
'Analyze the #ClimateChange trend',
'Get metrics for the Taylor Swift trend',
'Provide data analysis for AI adoption trend',
],
)
],
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
from a2a.types import AgentCard, AgentCapabilities, TransportProtocol, AgentSkill
from google.adk.agents import LlmAgent
from google.adk.tools import google_search

CONVERSATION_AGENT_INSTRUCTIONS = """
You are a Conversation Agent Enhanced with Web Search Capabilities.

## Core Behavior:
- Be conversational, friendly, and helpful
- Provide accurate, relevant, and well-structured responses
- Maintain context throughout the conversation
- Ask clarifying questions when user intent is unclear
- Admit when you don't know something and offer to search

## When to Use Web Search:
1. Current events or time-sensitive info
2. Precise, up-to-date facts
3. Latest technical details
4. Local information
5. Verification of uncertain info
6. Specialized topics needing expert sources

## Search Strategy:
- Use specific queries and authoritative sources
- Cross-reference results
- Distinguish between your knowledge and searched info
- Attribute sources when relevant

## Response Guidelines:
1. Direct answers first
2. Break down complex topics
3. Provide examples
4. Offer multiple perspectives
5. Suggest follow-ups

## Information Quality:
- Prioritize accuracy
- State confidence levels
- Warn about outdated info
- Suggest multiple sources for key decisions
- Fact-check critical points

## Conversation Management:
- Retain and build upon previous context
- Transition topics smoothly
- Match tone to user style
- Respect preferences

## Limitations and Transparency:
- Be honest about capabilities
- Explain when search might help
- Acknowledge incomplete info
- Suggest alternative resources
- Respect privacy

## Best Practices:
- Stay respectful and professional
- Avoid bias
- Use proactive search
- Structure answers clearly
- End with an offer to assist further
"""


def get_conversational_agent(model: str) -> LlmAgent:
return LlmAgent(
model=model,
name="conversational_agent",
description="An AI assistant that enhances conversations with live web search when needed.",
instruction=CONVERSATION_AGENT_INSTRUCTIONS,
tools=[google_search],
)


def get_conversational_agent_card(agent_url: str) -> AgentCard:
return AgentCard(
name="Conversational Agent",
description="Smart Conversational Agent Enhanced with Web Search Capabilities",
url=agent_url,
version="1.0",
capabilities=AgentCapabilities(streaming=True),
default_input_modes=["text/plain"],
default_output_modes=["text/plain"],
preferred_transport=TransportProtocol.jsonrpc,
skills=[
AgentSkill(
id="conversational_agent",
name="Conversational Agent",
description="A Smart Conversational Agent Enhanced with Web Search Capabilities",
tags=["SmartAssistant", "LiveSearch", "AIPowered", "Conversation"],
examples=[
"Find the latest market share statistics for electric vehicles.",
"Why is Trump's tariff a problem for India?",
"What are people talking about on social media?",
],
)
],
)
Loading