Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
113 changes: 50 additions & 63 deletions AntiPattern_Remediator/main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@

"""
Main entry point - Legacy Code Migration Tool
"""
from config.settings import initialize_settings
# from scripts import seed_database
from dotenv import load_dotenv
load_dotenv()
from colorama import Fore, Style
Expand All @@ -12,11 +10,34 @@
from full_repo_workflow import run_full_repo_workflow


def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
"""Run the original workflow with a hardcoded Java code snippet."""
print(Fore.BLUE + "\n=== Code Snippet Analysis Workflow ===" + Style.RESET_ALL)
print("Analyzing the provided Java code snippet...")

def main():
"""Main function: Run antipattern analysis"""

# Let user select provider
print("Available providers: 1) ollama 2) ibm 3) vllm")
choice = input("Select provider (1-3): ").strip()

provider_map = {"1": "ollama", "2": "ibm", "3": "vllm"}
provider = provider_map.get(choice, "ollama") # default to ollama

# Let us choose which DB to interact with
print("Choose your trove: 1) ChromaDB (VectorDB) 2) TinyDB (DocumentDB)")
db_choice = input("Choose 1 or 2: ").strip()

# Initialize global settings with selected provider
settings = initialize_settings(provider)
print(Fore.GREEN + f"Using {settings.LLM_PROVIDER} with model {settings.LLM_MODEL}" + Style.RESET_ALL)

# Temporary Lazy Imports
from src.core.graph import CreateGraph
from src.data.database import VectorDBManager, TinyDBManager
from src.core.prompt import PromptManager
from scripts import seed_database

# Initialize PromptManager
print("Initializing PromptManager...")
prompt_manager = PromptManager()

# Example Java code
legacy_code = """
public class ApplicationManager {
Expand Down Expand Up @@ -55,7 +76,8 @@ def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
}
}
"""


# Initial workflow state
initial_state = {
"code": legacy_code,
"context": None,
Expand All @@ -66,58 +88,12 @@ def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
"code_review_results": None,
"code_review_times": 0,
"msgs": [],
"answer": None
}

final_state = langgraph.invoke(initial_state)
"answer": None,

print(Fore.GREEN + f"\nAnalysis Complete!" + Style.RESET_ALL)
print(f"Final state keys: {list(final_state.keys())}")
print(f"Context retrieved: {'Yes' if final_state.get('context') else 'No'}")
print(f"Analysis completed: {'Yes' if final_state.get('antipatterns_scanner_results') else 'No'}")
print(f"Refactored code: {'Yes' if final_state.get('refactored_code') else 'No'}")
print(f"Code review results: {final_state.get('code_review_times')}")


def main():
"""Main function: Choose between code snippet analysis or full repository run"""

print(Fore.BLUE + "=== AntiPattern Remediator Tool ===" + Style.RESET_ALL)
print("Choose your analysis mode:")
print("1) Code Snippet Analysis - Analyze a sample Java code snippet")
print("2) Full Repository Run - Process files with 100% test coverage from JaCoCo results")

# Let user choose analysis mode
mode_choice = input("\nSelect mode (1-2): ").strip()

if mode_choice not in ["1", "2"]:
print(Fore.RED + "Invalid choice. Defaulting to Code Snippet Analysis." + Style.RESET_ALL)
mode_choice = "1"

# Let user select provider
print("\nAvailable providers: 1) ollama 2) ibm 3) vllm")
choice = input("Select provider (1-3): ").strip()

provider_map = {"1": "ollama", "2": "ibm", "3": "vllm"}
provider = provider_map.get(choice, "ollama") # default to ollama

# Let us choose which DB to interact with
print("Choose your trove: 1) ChromaDB (VectorDB) 2) TinyDB (DocumentDB)")
db_choice = input("Choose 1 or 2: ").strip()

# Initialize global settings with selected provider
settings = initialize_settings(provider)
print(Fore.GREEN + f"Using {settings.LLM_PROVIDER} with model {settings.LLM_MODEL}" + Style.RESET_ALL)

# Temporary Lazy Imports
from src.core.graph import CreateGraph
from src.data.database import VectorDBManager, TinyDBManager
from src.core.prompt import PromptManager
from scripts import seed_database

# Initialize PromptManager
print("Initializing PromptManager...")
prompt_manager = PromptManager()
# ExplainerAgent fields
"explanation_response_raw": None,
"explanation_json": None,
}

# Setup Database
if db_choice == "2":
Expand All @@ -133,11 +109,22 @@ def main():
retriever = db_manager.as_retriever()
langgraph = CreateGraph(db_manager, prompt_manager, retriever=retriever).workflow

# Run the selected workflow
if mode_choice == "1":
run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph)
# Final results summary
print(Fore.GREEN + f"\nAnalysis Complete!" + Style.RESET_ALL)
print(f"Final state keys: {list(final_state.keys())}")
print(f"Context retrieved: {'Yes' if final_state.get('context') else 'No'}")
print(f"Analysis completed: {'Yes' if final_state.get('antipatterns_scanner_results') else 'No'}")
print(f"Refactored code: {'Yes' if final_state.get('refactored_code') else 'No'}")
print(f"Code review results: {final_state.get('code_review_times')}")

# Show explanation from ExplainerAgent
if final_state.get("explanation_json"):
import json
print(Fore.CYAN + "\n=== Explanation (JSON) ===" + Style.RESET_ALL)
print(json.dumps(final_state["explanation_json"], indent=2, ensure_ascii=False))
else:
run_full_repo_workflow(settings, db_manager, prompt_manager, langgraph)
print(Fore.RED + "\nNo explanation was generated." + Style.RESET_ALL)


if __name__ == "__main__":
main()
4 changes: 3 additions & 1 deletion AntiPattern_Remediator/src/core/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,12 @@
from .refactor_strategist import RefactorStrategist
from .code_transformer import CodeTransformer
from .code_reviewer import CodeReviewerAgent
from .explainer import ExplainerAgent

__all__ = [
"AntipatternScanner",
"RefactorStrategist",
"CodeTransformer",
"CodeReviewerAgent"
"CodeReviewerAgent",
"ExplainerAgent"
]
131 changes: 131 additions & 0 deletions AntiPattern_Remediator/src/core/agents/explainer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
"""
ExplainerAgent — minimal version
- Delegates state handling to create_graph.py
- Uses PromptManager if available; otherwise a tiny inline fallback prompt
- Always passes msgs; always returns a non-empty explanation_json
"""
from __future__ import annotations

from typing import Dict, Any
import json

from langchain_core.language_models import BaseLanguageModel
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from ..prompt import PromptManager
from src.core.utils import extract_first_json

PROMPT_KEY = "explainer"


class ExplainerAgent:
def __init__(self, llm: BaseLanguageModel, prompt_manager: PromptManager):
self.llm = llm
self.prompt_manager = prompt_manager

def explain_antipattern(self, state: Dict[str, Any]) -> Dict[str, Any]:
"""Generate explanation JSON for detected antipatterns and refactor."""
kwargs = dict(
code=state.get("code", ""),
language=state.get("language", "Java"),
context=state.get("context", ""),
refactored_code=state.get("refactored_code", ""),
refactor_rationale=state.get("refactor_rationale", ""),
antipattern_name=state.get("antipattern_name", "Unknown antipattern"),
antipattern_description=state.get("antipattern_description", ""),
antipatterns_json=json.dumps(state.get("antipatterns_json", []), ensure_ascii=False),
msgs=state.get("msgs", []), # ensure MessagesPlaceholder is satisfied
)

messages = self._build_messages(**kwargs)

try:
response = self.llm.invoke(messages)
raw = getattr(response, "content", None) or str(response)
except Exception as e:
raw = f"LLM error: {e}"

state["explanation_response_raw"] = raw

# Robust parse: accept dict, wrap list, or emit a minimal fallback
try:
parsed = extract_first_json(raw)
except Exception:
parsed = None

if isinstance(parsed, dict):
state["explanation_json"] = parsed
elif isinstance(parsed, list):
state["explanation_json"] = {"items": parsed}
else:
state["explanation_json"] = self._fallback_payload(state)

return state

def display_explanation(self, state: Dict[str, Any]) -> Dict[str, Any]:
print("\n=== Explanation (raw) ===\n", state.get("explanation_response_raw", "N/A"))
if state.get("explanation_json"):
print("\n=== Explanation (JSON) ===\n",
json.dumps(state["explanation_json"], indent=2, ensure_ascii=False))
return state

def _build_messages(self, **kwargs) -> Any:
# Always ensure msgs exists
if "msgs" not in kwargs or kwargs["msgs"] is None:
kwargs = {**kwargs, "msgs": []}

# 1) Try preloaded template from PromptManager
prompt = None
getp = getattr(self.prompt_manager, "get_prompt", None)
if callable(getp):
prompt = getp(PROMPT_KEY)
if prompt is not None:
return prompt.format_messages(**kwargs)

# 2) Minimal inline fallback
schema = {
"items": [{
"antipattern_name": "",
"antipattern_description": "",
"impact": "",
"why_it_is_bad": "",
"how_we_fixed_it": "",
"refactored_code": "",
"summary": ""
}],
"what_changed": [],
"why_better": [],
"principles_applied": [],
"trade_offs": [],
"closing_summary": ""
}
content = (
"Given inputs (JSON):\n" + json.dumps({k: v for k, v in kwargs.items() if k != "msgs"}, ensure_ascii=False) +
"\nRespond with STRICT JSON using exactly this schema:\n" +
json.dumps(schema, ensure_ascii=False)
)
fallback = ChatPromptTemplate.from_messages([
("system", "Return STRICT JSON only. No commentary."),
("user", content),
MessagesPlaceholder("msgs"),
])
return fallback.format_messages(**kwargs)

@staticmethod
def _fallback_payload(state: Dict[str, Any]) -> Dict[str, Any]:
"""Tiny fallback so downstream never breaks if parsing fails."""
return {
"items": [{
"antipattern_name": state.get("antipattern_name", "Unknown antipattern"),
"antipattern_description": state.get("antipattern_description", ""),
"impact": "",
"why_it_is_bad": "",
"how_we_fixed_it": state.get("refactor_rationale", ""),
"refactored_code": state.get("refactored_code", ""),
"summary": "Auto-generated minimal explanation (parser fallback)."
}],
"what_changed": [],
"why_better": [],
"principles_applied": [],
"trade_offs": [],
"closing_summary": ""
}
13 changes: 10 additions & 3 deletions AntiPattern_Remediator/src/core/graph/create_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from ..agents import RefactorStrategist
from ..agents import CodeTransformer
from ..agents import CodeReviewerAgent
from ..agents import ExplainerAgent

# Imports for LangSmith tracing
import os
Expand Down Expand Up @@ -62,7 +63,7 @@ def __init__(self, db_manager, prompt_manager: PromptManager, retriever=None, ll
self.prompt_manager = prompt_manager
self.conditional_edges = ConditionalEdges()

# assign the instance attribute before use
# assign the instance attribute before use
self.retriever = retriever or self.db_manager.as_retriever()

retriever_tool = create_retriever_tool(
Expand All @@ -78,13 +79,13 @@ def __init__(self, db_manager, prompt_manager: PromptManager, retriever=None, ll
"strategist": RefactorStrategist(self.llm, self.prompt_manager, retriever=self.retriever),
"transformer": CodeTransformer(self.llm, self.prompt_manager),
"reviewer": CodeReviewerAgent(self.llm, self.prompt_manager),
"explainer": ExplainerAgent(self.llm, self.prompt_manager)
}

# Build the LangGraph workflow
self.workflow = self._build_graph()

def _build_graph(self):
"""Build LangGraph workflow"""
graph = StateGraph(AgentState)

# Scanner: retrieve + analyze
Expand All @@ -104,6 +105,10 @@ def _build_graph(self):
graph.add_node("review_code", self.agents["reviewer"].review_code)
graph.add_node("display_code_review_results", self.agents["reviewer"].display_code_review_results)

# Explainer: final storytelling
graph.add_node("explain_antipattern", self.agents["explainer"].explain_antipattern)
graph.add_node("display_explanation", self.agents["explainer"].display_explanation)

# Topology
graph.set_entry_point("retrieve_context")
graph.add_edge("retrieve_context", "analyze_antipatterns")
Expand All @@ -123,6 +128,8 @@ def _build_graph(self):
},
)

graph.add_edge("display_code_review_results", END)
graph.add_edge("display_code_review_results", "explain_antipattern")
graph.add_edge("explain_antipattern", "display_explanation")
graph.add_edge("display_explanation", END)

return graph.compile()
Loading