Skip to content

Commit ce78668

Browse files
Merge pull request #28 from Andrei-Constantin-Programmer/IBM-21-Explainer_Agent
Ibm 21 explainer agent
2 parents 4b2c453 + c3677e2 commit ce78668

File tree

11 files changed

+321
-98
lines changed

11 files changed

+321
-98
lines changed

AntiPattern_Remediator/main.py

Lines changed: 51 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
1-
21
"""
32
Main entry point - Legacy Code Migration Tool
43
"""
54
from config.settings import initialize_settings
6-
# from scripts import seed_database
75
from dotenv import load_dotenv
86
load_dotenv()
97
from colorama import Fore, Style
@@ -12,11 +10,35 @@
1210
from full_repo_workflow import run_full_repo_workflow
1311

1412

15-
def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
16-
"""Run the original workflow with a hardcoded Java code snippet."""
17-
print(Fore.BLUE + "\n=== Code Snippet Analysis Workflow ===" + Style.RESET_ALL)
18-
print("Analyzing the provided Java code snippet...")
19-
13+
14+
def main():
15+
"""Main function: Run antipattern analysis"""
16+
17+
# Let user select provider
18+
print("Available providers: 1) ollama 2) ibm 3) vllm")
19+
choice = input("Select provider (1-3): ").strip()
20+
21+
provider_map = {"1": "ollama", "2": "ibm", "3": "vllm"}
22+
provider = provider_map.get(choice, "ollama") # default to ollama
23+
24+
# Let us choose which DB to interact with
25+
print("Choose your trove: 1) ChromaDB (VectorDB) 2) TinyDB (DocumentDB)")
26+
db_choice = input("Choose 1 or 2: ").strip()
27+
28+
# Initialize global settings with selected provider
29+
settings = initialize_settings(provider)
30+
print(Fore.GREEN + f"Using {settings.LLM_PROVIDER} with model {settings.LLM_MODEL}" + Style.RESET_ALL)
31+
32+
# Temporary Lazy Imports
33+
from src.core.graph import CreateGraph
34+
from src.data.database import VectorDBManager, TinyDBManager
35+
from src.core.prompt import PromptManager
36+
from scripts import seed_database
37+
38+
# Initialize PromptManager
39+
print("Initializing PromptManager...")
40+
prompt_manager = PromptManager()
41+
2042
# Example Java code
2143
legacy_code = """
2244
public class ApplicationManager {
@@ -55,7 +77,8 @@ def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
5577
}
5678
}
5779
"""
58-
80+
81+
# Initial workflow state
5982
initial_state = {
6083
"code": legacy_code,
6184
"context": None,
@@ -66,58 +89,12 @@ def run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph):
6689
"code_review_results": None,
6790
"code_review_times": 0,
6891
"msgs": [],
69-
"answer": None
70-
}
71-
72-
final_state = langgraph.invoke(initial_state)
73-
74-
print(Fore.GREEN + f"\nAnalysis Complete!" + Style.RESET_ALL)
75-
print(f"Final state keys: {list(final_state.keys())}")
76-
print(f"Context retrieved: {'Yes' if final_state.get('context') else 'No'}")
77-
print(f"Analysis completed: {'Yes' if final_state.get('antipatterns_scanner_results') else 'No'}")
78-
print(f"Refactored code: {'Yes' if final_state.get('refactored_code') else 'No'}")
79-
print(f"Code review results: {final_state.get('code_review_times')}")
80-
81-
82-
def main():
83-
"""Main function: Choose between code snippet analysis or full repository run"""
84-
85-
print(Fore.BLUE + "=== AntiPattern Remediator Tool ===" + Style.RESET_ALL)
86-
print("Choose your analysis mode:")
87-
print("1) Code Snippet Analysis - Analyze a sample Java code snippet")
88-
print("2) Full Repository Run - Process files with 100% test coverage from JaCoCo results")
89-
90-
# Let user choose analysis mode
91-
mode_choice = input("\nSelect mode (1-2): ").strip()
92-
93-
if mode_choice not in ["1", "2"]:
94-
print(Fore.RED + "Invalid choice. Defaulting to Code Snippet Analysis." + Style.RESET_ALL)
95-
mode_choice = "1"
96-
97-
# Let user select provider
98-
print("\nAvailable providers: 1) ollama 2) ibm 3) vllm")
99-
choice = input("Select provider (1-3): ").strip()
100-
101-
provider_map = {"1": "ollama", "2": "ibm", "3": "vllm"}
102-
provider = provider_map.get(choice, "ollama") # default to ollama
103-
104-
# Let us choose which DB to interact with
105-
print("Choose your trove: 1) ChromaDB (VectorDB) 2) TinyDB (DocumentDB)")
106-
db_choice = input("Choose 1 or 2: ").strip()
107-
108-
# Initialize global settings with selected provider
109-
settings = initialize_settings(provider)
110-
print(Fore.GREEN + f"Using {settings.LLM_PROVIDER} with model {settings.LLM_MODEL}" + Style.RESET_ALL)
92+
"answer": None,
11193

112-
# Temporary Lazy Imports
113-
from src.core.graph import CreateGraph
114-
from src.data.database import VectorDBManager, TinyDBManager
115-
from src.core.prompt import PromptManager
116-
from scripts import seed_database
117-
118-
# Initialize PromptManager
119-
print("Initializing PromptManager...")
120-
prompt_manager = PromptManager()
94+
# ExplainerAgent fields
95+
"explanation_response_raw": None,
96+
"explanation_json": None,
97+
}
12198

12299
# Setup Database
123100
if db_choice == "2":
@@ -133,11 +110,22 @@ def main():
133110
retriever = db_manager.as_retriever()
134111
langgraph = CreateGraph(db_manager, prompt_manager, retriever=retriever).workflow
135112

136-
# Run the selected workflow
137-
if mode_choice == "1":
138-
run_code_snippet_workflow(settings, db_manager, prompt_manager, langgraph)
113+
# Final results summary
114+
print(Fore.GREEN + f"\nAnalysis Complete!" + Style.RESET_ALL)
115+
print(f"Final state keys: {list(final_state.keys())}")
116+
print(f"Context retrieved: {'Yes' if final_state.get('context') else 'No'}")
117+
print(f"Analysis completed: {'Yes' if final_state.get('antipatterns_scanner_results') else 'No'}")
118+
print(f"Refactored code: {'Yes' if final_state.get('refactored_code') else 'No'}")
119+
print(f"Code review results: {final_state.get('code_review_times')}")
120+
121+
# Show explanation from ExplainerAgent
122+
if final_state.get("explanation_json"):
123+
import json
124+
print(Fore.CYAN + "\n=== Explanation (JSON) ===" + Style.RESET_ALL)
125+
print(json.dumps(final_state["explanation_json"], indent=2, ensure_ascii=False))
139126
else:
140-
run_full_repo_workflow(settings, db_manager, prompt_manager, langgraph)
127+
print(Fore.RED + "\nNo explanation was generated." + Style.RESET_ALL)
128+
141129

142130
if __name__ == "__main__":
143131
main()

AntiPattern_Remediator/src/core/agents/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,12 @@
77
from .refactor_strategist import RefactorStrategist
88
from .code_transformer import CodeTransformer
99
from .code_reviewer import CodeReviewerAgent
10+
from .explainer import ExplainerAgent
1011

1112
__all__ = [
1213
"AntipatternScanner",
1314
"RefactorStrategist",
1415
"CodeTransformer",
15-
"CodeReviewerAgent"
16+
"CodeReviewerAgent",
17+
"ExplainerAgent"
1618
]
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
"""
2+
ExplainerAgent — minimal version
3+
- Delegates state handling to create_graph.py
4+
- Uses PromptManager if available; otherwise a tiny inline fallback prompt
5+
- Always passes msgs; always returns a non-empty explanation_json
6+
"""
7+
from __future__ import annotations
8+
9+
from typing import Dict, Any
10+
import json
11+
12+
from langchain_core.language_models import BaseLanguageModel
13+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
14+
from ..prompt import PromptManager
15+
from src.core.utils import extract_first_json
16+
17+
PROMPT_KEY = "explainer"
18+
19+
20+
class ExplainerAgent:
21+
def __init__(self, llm: BaseLanguageModel, prompt_manager: PromptManager):
22+
self.llm = llm
23+
self.prompt_manager = prompt_manager
24+
25+
def explain_antipattern(self, state: Dict[str, Any]) -> Dict[str, Any]:
26+
"""Generate explanation JSON for detected antipatterns and refactor."""
27+
kwargs = dict(
28+
code=state.get("code", ""),
29+
language=state.get("language", "Java"),
30+
context=state.get("context", ""),
31+
refactored_code=state.get("refactored_code", ""),
32+
refactor_rationale=state.get("refactor_rationale", ""),
33+
antipattern_name=state.get("antipattern_name", "Unknown antipattern"),
34+
antipattern_description=state.get("antipattern_description", ""),
35+
antipatterns_json=json.dumps(state.get("antipatterns_json", []), ensure_ascii=False),
36+
msgs=state.get("msgs", []), # ensure MessagesPlaceholder is satisfied
37+
)
38+
39+
messages = self._build_messages(**kwargs)
40+
41+
try:
42+
response = self.llm.invoke(messages)
43+
raw = getattr(response, "content", None) or str(response)
44+
except Exception as e:
45+
raw = f"LLM error: {e}"
46+
47+
state["explanation_response_raw"] = raw
48+
49+
# Robust parse: accept dict, wrap list, or emit a minimal fallback
50+
try:
51+
parsed = extract_first_json(raw)
52+
except Exception:
53+
parsed = None
54+
55+
if isinstance(parsed, dict):
56+
state["explanation_json"] = parsed
57+
elif isinstance(parsed, list):
58+
state["explanation_json"] = {"items": parsed}
59+
else:
60+
state["explanation_json"] = self._fallback_payload(state)
61+
62+
return state
63+
64+
def display_explanation(self, state: Dict[str, Any]) -> Dict[str, Any]:
65+
print("\n=== Explanation (raw) ===\n", state.get("explanation_response_raw", "N/A"))
66+
if state.get("explanation_json"):
67+
print("\n=== Explanation (JSON) ===\n",
68+
json.dumps(state["explanation_json"], indent=2, ensure_ascii=False))
69+
return state
70+
71+
def _build_messages(self, **kwargs) -> Any:
72+
# Always ensure msgs exists
73+
if "msgs" not in kwargs or kwargs["msgs"] is None:
74+
kwargs = {**kwargs, "msgs": []}
75+
76+
# 1) Try preloaded template from PromptManager
77+
prompt = None
78+
getp = getattr(self.prompt_manager, "get_prompt", None)
79+
if callable(getp):
80+
prompt = getp(PROMPT_KEY)
81+
if prompt is not None:
82+
return prompt.format_messages(**kwargs)
83+
84+
# 2) Minimal inline fallback
85+
schema = {
86+
"items": [{
87+
"antipattern_name": "",
88+
"antipattern_description": "",
89+
"impact": "",
90+
"why_it_is_bad": "",
91+
"how_we_fixed_it": "",
92+
"refactored_code": "",
93+
"summary": ""
94+
}],
95+
"what_changed": [],
96+
"why_better": [],
97+
"principles_applied": [],
98+
"trade_offs": [],
99+
"closing_summary": ""
100+
}
101+
content = (
102+
"Given inputs (JSON):\n" + json.dumps({k: v for k, v in kwargs.items() if k != "msgs"}, ensure_ascii=False) +
103+
"\nRespond with STRICT JSON using exactly this schema:\n" +
104+
json.dumps(schema, ensure_ascii=False)
105+
)
106+
fallback = ChatPromptTemplate.from_messages([
107+
("system", "Return STRICT JSON only. No commentary."),
108+
("user", content),
109+
MessagesPlaceholder("msgs"),
110+
])
111+
return fallback.format_messages(**kwargs)
112+
113+
@staticmethod
114+
def _fallback_payload(state: Dict[str, Any]) -> Dict[str, Any]:
115+
"""Tiny fallback so downstream never breaks if parsing fails."""
116+
return {
117+
"items": [{
118+
"antipattern_name": state.get("antipattern_name", "Unknown antipattern"),
119+
"antipattern_description": state.get("antipattern_description", ""),
120+
"impact": "",
121+
"why_it_is_bad": "",
122+
"how_we_fixed_it": state.get("refactor_rationale", ""),
123+
"refactored_code": state.get("refactored_code", ""),
124+
"summary": "Auto-generated minimal explanation (parser fallback)."
125+
}],
126+
"what_changed": [],
127+
"why_better": [],
128+
"principles_applied": [],
129+
"trade_offs": [],
130+
"closing_summary": ""
131+
}

AntiPattern_Remediator/src/core/graph/create_graph.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from ..agents import RefactorStrategist
1515
from ..agents import CodeTransformer
1616
from ..agents import CodeReviewerAgent
17+
from ..agents import ExplainerAgent
1718

1819
# Imports for LangSmith tracing
1920
import os
@@ -62,7 +63,7 @@ def __init__(self, db_manager, prompt_manager: PromptManager, retriever=None, ll
6263
self.prompt_manager = prompt_manager
6364
self.conditional_edges = ConditionalEdges()
6465

65-
# assign the instance attribute before use
66+
# assign the instance attribute before use
6667
self.retriever = retriever or self.db_manager.as_retriever()
6768

6869
retriever_tool = create_retriever_tool(
@@ -78,13 +79,13 @@ def __init__(self, db_manager, prompt_manager: PromptManager, retriever=None, ll
7879
"strategist": RefactorStrategist(self.llm, self.prompt_manager, retriever=self.retriever),
7980
"transformer": CodeTransformer(self.llm, self.prompt_manager),
8081
"reviewer": CodeReviewerAgent(self.llm, self.prompt_manager),
82+
"explainer": ExplainerAgent(self.llm, self.prompt_manager)
8183
}
8284

8385
# Build the LangGraph workflow
8486
self.workflow = self._build_graph()
8587

8688
def _build_graph(self):
87-
"""Build LangGraph workflow"""
8889
graph = StateGraph(AgentState)
8990

9091
# Scanner: retrieve + analyze
@@ -104,6 +105,10 @@ def _build_graph(self):
104105
graph.add_node("review_code", self.agents["reviewer"].review_code)
105106
graph.add_node("display_code_review_results", self.agents["reviewer"].display_code_review_results)
106107

108+
# Explainer: final storytelling
109+
graph.add_node("explain_antipattern", self.agents["explainer"].explain_antipattern)
110+
graph.add_node("display_explanation", self.agents["explainer"].display_explanation)
111+
107112
# Topology
108113
graph.set_entry_point("retrieve_context")
109114
graph.add_edge("retrieve_context", "analyze_antipatterns")
@@ -123,6 +128,8 @@ def _build_graph(self):
123128
},
124129
)
125130

126-
graph.add_edge("display_code_review_results", END)
131+
graph.add_edge("display_code_review_results", "explain_antipattern")
132+
graph.add_edge("explain_antipattern", "display_explanation")
133+
graph.add_edge("display_explanation", END)
127134

128135
return graph.compile()

0 commit comments

Comments
 (0)