Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,8 @@ cookies.txt
registry/server_state.json
registry/nginx_mcp_revproxy.conf
logs/
token_refresher.pid
token_refresher.log

# Secrets and API keys - never commit these!
.keys.yml
Expand Down
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ Transform how both autonomous AI agents and development teams access enterprise
- **Amazon Bedrock AgentCore Integration** - Direct access to AWS services through managed MCP endpoints
- **Three-Legged OAuth (3LO) Support** - External service integration (Atlassian, Google, GitHub)
- **JWT Token Vending Service** - Self-service token generation for automation
- **Automated Token Refresh Service** - Background token refresh to maintain continuous authentication
- **Modern React Frontend** - Complete UI overhaul with TypeScript and real-time updates
- **Dynamic Tool Discovery** - AI agents autonomously find and execute specialized tools
- **Fine-Grained Access Control** - Granular permissions for servers, methods, and individual tools
Expand All @@ -347,7 +348,8 @@ Transform how both autonomous AI agents and development teams access enterprise
|------------------|-------------------|------------------------|
| [Installation Guide](docs/installation.md)<br/>Complete setup instructions for EC2 and EKS | [Authentication Guide](docs/auth.md)<br/>OAuth and identity provider integration | [AI Coding Assistants Setup](docs/ai-coding-assistants-setup.md)<br/>VS Code, Cursor, Claude Code integration |
| [Quick Start Tutorial](docs/quick-start.md)<br/>Get running in 5 minutes | [Amazon Cognito Setup](docs/cognito.md)<br/>Step-by-step IdP configuration | [API Reference](docs/registry_api.md)<br/>Programmatic registry management |
| [Configuration Reference](docs/configuration.md)<br/>Environment variables and settings | [Fine-Grained Access Control](docs/scopes.md)<br/>Permission management and security | [Dynamic Tool Discovery](docs/dynamic-tool-discovery.md)<br/>Autonomous agent capabilities |
| [Configuration Reference](docs/configuration.md)<br/>Environment variables and settings | [Fine-Grained Access Control](docs/scopes.md)<br/>Permission management and security | [Token Refresh Service](docs/token-refresh-service.md)<br/>Automated token refresh and lifecycle management |
| | | [Dynamic Tool Discovery](docs/dynamic-tool-discovery.md)<br/>Autonomous agent capabilities |
| | | [Production Deployment](docs/installation.md)<br/>Complete setup for production environments |
| | | [Troubleshooting Guide](docs/FAQ.md)<br/>Common issues and solutions |

Expand Down
70 changes: 65 additions & 5 deletions agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -771,6 +771,8 @@ def print_agent_response(response_dict: Dict[str, Any], verbose: bool = False) -
response_dict: Dictionary containing the agent response with 'messages' key
verbose: Whether to show detailed debug information
"""
# Debug: Log entry to function
logger.debug(f"print_agent_response called with verbose={verbose}, response_dict keys: {response_dict.keys() if response_dict else 'None'}")
if verbose:
# Define ANSI color codes for different message types
COLORS = {
Expand Down Expand Up @@ -844,17 +846,73 @@ def print_agent_response(response_dict: Dict[str, Any], verbose: bool = False) -
logger.info(f"{'=' * 20} END OF {msg_type} MESSAGE #{i} {'=' * 20}{reset}")
logger.info("")

# Always show the final AI response
# Always show the final AI response (both in verbose and non-verbose mode)
# This section runs regardless of verbose flag
if not verbose:
logger.info("=== Attempting to print final response (non-verbose mode) ===")

if response_dict and "messages" in response_dict and response_dict["messages"]:
# Debug: Log that we're looking for the final AI message
if not verbose:
logger.info(f"Found {len(response_dict['messages'])} messages in response")

# Get the last AI message from the response
for message in reversed(response_dict["messages"]):
message_type = type(message).__name__
if "AIMessage" in message_type or "ai" in str(message).lower():
if isinstance(message, dict) and "content" in message:
print("\n" + message["content"])

# Debug logging in non-verbose mode to understand what's happening
if not verbose:
logger.debug(f"Checking message type: {message_type}")

# Check if this is an AI message
if "AIMessage" in message_type or "ai" in str(type(message)).lower():
# Extract and print the content
content = None

# Try different ways to extract content
if hasattr(message, 'content'):
content = message.content
elif isinstance(message, dict) and "content" in message:
content = message["content"]
else:
print("\n" + str(message.content))
# Try to extract content from string representation as last resort
try:
content = str(message)
except:
content = None

# Print the content if we found any
if content:
# Force print the final response regardless of any conditions
print("\n" + str(content), flush=True)

if not verbose:
logger.info(f"Final AI Response printed (length: {len(str(content))} chars)")
else:
if not verbose:
logger.warning(f"AI message found but no content extracted. Message type: {message_type}, Message attrs: {dir(message) if hasattr(message, '__dict__') else 'N/A'}")

# We found an AI message, stop looking
break
else:
# No AI message found - try to print the last message regardless
if not verbose:
logger.warning("No AI message found in response, attempting to print last message")
logger.debug(f"Messages in response: {[type(m).__name__ for m in response_dict['messages']]}")

# As a fallback, print the last message if it has content
if response_dict["messages"]:
last_message = response_dict["messages"][-1]
content = None

if hasattr(last_message, 'content'):
content = last_message.content
elif isinstance(last_message, dict) and "content" in last_message:
content = last_message["content"]

if content:
print("\n[Response]\n" + str(content), flush=True)
logger.info(f"Printed last message as fallback (type: {type(last_message).__name__})")


class InteractiveAgent:
Expand Down Expand Up @@ -1232,6 +1290,8 @@ async def main():
if not args.interactive:
# Single-turn mode - just show the response and exit
logger.info("\nResponse:" + "\n" + "-"*40)
logger.debug(f"Calling print_agent_response with verbose={args.verbose}")
logger.debug(f"Response has {len(response.get('messages', []))} messages")
print_agent_response(response, args.verbose)
return
else:
Expand Down
93 changes: 16 additions & 77 deletions build_and_run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,65 +14,6 @@ handle_error() {
exit 1
}

# Update auth tokens from .oauth-tokens files
update_auth_tokens() {
log "Updating auth tokens from .oauth-tokens directory..."

OAUTH_TOKENS_DIR="$PWD/.oauth-tokens"
if [ ! -d "$OAUTH_TOKENS_DIR" ]; then
log "No .oauth-tokens directory found at $OAUTH_TOKENS_DIR - skipping token updates"
return
fi

# Create backup of .env (overwrite previous backup)
cp .env .env.backup

# Process each egress.json file
for egress_file in "$OAUTH_TOKENS_DIR"/*egress.json; do
if [ ! -f "$egress_file" ]; then
continue
fi

# Extract server name from filename (remove egress.json suffix)
filename=$(basename "$egress_file")
server_name=$(echo "$filename" | sed 's/egress\.json$//')

# Map specific server names to expected environment variable names
case "$server_name" in
"atlassian-atlassian-")
env_var_name="ATLASSIAN_AUTH_TOKEN"
;;
"bedrock-agentcore-sre-gateway-")
env_var_name="SRE_GATEWAY_AUTH_TOKEN"
;;
*)
# Generic conversion: convert to uppercase and replace hyphens with underscores
env_var_name=$(echo "${server_name}" | sed 's/-$//' | tr '[:lower:]' '[:upper:]' | tr '-' '_')_AUTH_TOKEN
;;
esac

# Extract access_token from JSON file
if command -v jq &> /dev/null; then
access_token=$(jq -r '.access_token // empty' "$egress_file" 2>/dev/null)
else
# Fallback method without jq
access_token=$(grep -o '"access_token"[[:space:]]*:[[:space:]]*"[^"]*"' "$egress_file" | sed 's/.*"access_token"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
fi

if [ -n "$access_token" ] && [ "$access_token" != "null" ]; then
log "Found token for $server_name -> $env_var_name"

# Remove existing token line if present
sed -i "/^${env_var_name}=/d" .env

# Add new token
echo "${env_var_name}=\"${access_token}\"" >> .env
log "✓ Updated $env_var_name in .env"
else
log "⚠ No valid access_token found in $egress_file"
fi
done
}

log "Starting MCP Gateway Docker Compose deployment script"

Expand Down Expand Up @@ -131,9 +72,6 @@ fi

log "Found .env file"

# Update auth tokens from .oauth-tokens files
update_auth_tokens

# Check if docker-compose is installed
if ! command -v docker-compose &> /dev/null; then
log "ERROR: docker-compose is not installed"
Expand Down Expand Up @@ -186,9 +124,9 @@ if [ -d "registry/servers" ]; then

# Verify atlassian.json was copied
if [ -f "$MCPGATEWAY_SERVERS_DIR/atlassian.json" ]; then
log "atlassian.json copied successfully"
log "atlassian.json copied successfully"
else
log " atlassian.json not found in copied files"
log "WARNING: atlassian.json not found in copied files"
fi
else
log "No JSON files found in registry/servers"
Expand All @@ -206,7 +144,7 @@ if [ -f "auth_server/scopes.yml" ]; then

# Copy scopes.yml
sudo cp auth_server/scopes.yml "$AUTH_SERVER_DIR/"
log "scopes.yml copied successfully to $AUTH_SERVER_DIR"
log "scopes.yml copied successfully to $AUTH_SERVER_DIR"
else
log "WARNING: auth_server/scopes.yml not found"
fi
Expand Down Expand Up @@ -258,46 +196,46 @@ log "Verifying services are healthy..."

# Check registry service
if curl -f http://localhost:7860/health &>/dev/null; then
log "Registry service is healthy"
log "Registry service is healthy"
else
log " Registry service may still be starting up..."
log "WARNING: Registry service may still be starting up..."
fi

# Check auth service
if curl -f http://localhost:8888/health &>/dev/null; then
log "Auth service is healthy"
log "Auth service is healthy"
else
log " Auth service may still be starting up..."
log "WARNING: Auth service may still be starting up..."
fi

# Check nginx is responding
if curl -f http://localhost:80 &>/dev/null || curl -k -f https://localhost:443 &>/dev/null; then
log "Nginx is responding"
log "Nginx is responding"
else
log " Nginx may still be starting up..."
log "WARNING: Nginx may still be starting up..."
fi

# Verify FAISS index creation
log "Verifying FAISS index creation..."
sleep 5 # Give registry service time to create the index

if [ -f "$MCPGATEWAY_SERVERS_DIR/service_index.faiss" ]; then
log "FAISS index created successfully at $MCPGATEWAY_SERVERS_DIR/service_index.faiss"
log "FAISS index created successfully at $MCPGATEWAY_SERVERS_DIR/service_index.faiss"

# Check if metadata file also exists
if [ -f "$MCPGATEWAY_SERVERS_DIR/service_index_metadata.json" ]; then
log "FAISS index metadata created successfully"
log "FAISS index metadata created successfully"
else
log " FAISS index metadata file not found"
log "WARNING: FAISS index metadata file not found"
fi
else
log " FAISS index not yet created. The registry service will create it on first access."
log "WARNING: FAISS index not yet created. The registry service will create it on first access."
fi

# Verify server list includes Atlassian
log "Verifying server list..."
if [ -f "$MCPGATEWAY_SERVERS_DIR/atlassian.json" ]; then
log "Atlassian server configuration present"
log "Atlassian server configuration present"
fi

# List all available server JSON files
Expand All @@ -306,9 +244,10 @@ if ls "$MCPGATEWAY_SERVERS_DIR"/*.json 2>/dev/null | head -n 10; then
TOTAL_SERVERS=$(ls "$MCPGATEWAY_SERVERS_DIR"/*.json 2>/dev/null | wc -l)
log "Total server configurations: $TOTAL_SERVERS"
else
log " No server configurations found in $MCPGATEWAY_SERVERS_DIR"
log "WARNING: No server configurations found in $MCPGATEWAY_SERVERS_DIR"
fi


log "Deployment completed successfully"
log ""
log "Services are available at:"
Expand Down
12 changes: 6 additions & 6 deletions credentials-provider/oauth/ingress_oauth.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _perform_m2m_authentication(
"region": region
}

logger.info("🎉 M2M token obtained successfully!")
logger.info("M2M token obtained successfully!")

if expires_at:
expires_in = int(expires_at - time.time())
Expand Down Expand Up @@ -214,7 +214,7 @@ def _save_ingress_tokens(token_data: Dict[str, Any]) -> str:

# Secure the file
ingress_path.chmod(0o600)
logger.info(f"📁 Saved ingress tokens to: {ingress_path}")
logger.info(f"Saved ingress tokens to: {ingress_path}")

return str(ingress_path)

Expand Down Expand Up @@ -290,7 +290,7 @@ def main() -> int:
user_pool_id = os.getenv("INGRESS_OAUTH_USER_POOL_ID")
region = os.getenv("AWS_REGION", "us-east-1")

logger.info("🔐 Starting INGRESS OAuth authentication (Cognito M2M)")
logger.info("Starting INGRESS OAuth authentication (Cognito M2M)")
logger.info(f"User Pool ID: {user_pool_id}")
logger.info(f"Client ID: {client_id[:10]}...")
logger.info(f"Region: {region}")
Expand All @@ -299,7 +299,7 @@ def main() -> int:
if not args.force:
existing_tokens = _load_existing_tokens()
if existing_tokens:
logger.info("Using existing valid ingress token")
logger.info("Using existing valid ingress token")
logger.info(f"Token expires at: {existing_tokens.get('expires_at_human', 'Unknown')}")
return 0

Expand All @@ -314,13 +314,13 @@ def main() -> int:
# Save tokens
saved_path = _save_ingress_tokens(token_data)

logger.info("INGRESS OAuth authentication completed successfully!")
logger.info("INGRESS OAuth authentication completed successfully!")
logger.info(f"Tokens saved to: {saved_path}")

return 0

except Exception as e:
logger.error(f" INGRESS OAuth authentication failed: {e}")
logger.error(f"ERROR: INGRESS OAuth authentication failed: {e}")
if args.verbose:
import traceback
logger.error(traceback.format_exc())
Expand Down
Loading
Loading